source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
quick_server.py
|
""" The following code has been adapted from mpld3. Modifications (c) 2014,
Zachary King.
mpld3, http://mpld3.github.io/, A Simple server used to show mpld3 images.
Copyright (c) 2013, Jake Vanderplas
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import threading
import webbrowser
import socket
import itertools
import random
IPYTHON_WARNING = """
Note: You must interrupt the kernel to end this command
"""
try:
# Python 2.x
import BaseHTTPServer as server
except ImportError:
# Python 3.x
from http import server
def generate_handler(html, files=None):
if files is None:
files = {}
class MyHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode('utf-8'))
elif self.path in files:
content_type, content = files[self.path]
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content)
else:
self.send_error(404)
return MyHandler
def find_open_port(ip, port, n=50):
"""Find an open port near the specified port"""
ports = itertools.chain((port + i for i in range(n)),
(port + random.randint(-2 * n, 2 * n)))
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
s.close()
if result != 0:
return port
raise ValueError("no open ports found")
def serve_and_open(html, ip='127.0.0.1', port=8888, n_retries=50, files=None,
ipython_warning=True):
"""Start a server serving the given HTML, and open a browser
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port is in use.
files : dictionary (optional)
dictionary of extra content to serve
ipython_warning : bool (optional)
if True (default), then print a warning if this is used within IPython
"""
port = find_open_port(ip, port, n_retries)
Handler = generate_handler(html, files)
srvr = server.HTTPServer((ip, port), Handler)
if ipython_warning:
try:
__IPYTHON__
except:
pass
else:
print(IPYTHON_WARNING)
# Start the server
print(("Serving to http://{0}:{1}/\n".format(ip, port) +
"[Ctrl-C to exit from terminal, or Ctrl-M i i to interrupt notebook kernel]"))
sys.stdout.flush()
# Use a thread to open a web browser pointing to the server
b = lambda: webbrowser.open('http://{0}:{1}'.format(ip, port))
threading.Thread(target=b).start()
try:
srvr.serve_forever()
except (KeyboardInterrupt, SystemExit):
print("\nstopping Server...")
srvr.server_close()
|
test_statsd_thread_safety.py
|
# stdlib
from collections import deque
from functools import reduce
import threading
import time
import unittest
# 3p
from mock import patch
# datadog
from datadog.dogstatsd.base import DogStatsd
from datadog.util.compat import is_p3k
class FakeSocket(object):
"""
Mocked socket for testing.
"""
def __init__(self):
self.payloads = deque()
def send(self, payload):
if is_p3k():
assert type(payload) == bytes
else:
assert type(payload) == str
self.payloads.append(payload)
def recv(self):
try:
return self.payloads
except IndexError:
return None
def __repr__(self):
return str(self.payloads)
class TestDogStatsdThreadSafety(unittest.TestCase):
"""
DogStatsd thread safety tests.
"""
def setUp(self):
"""
Mock a socket.
"""
self.socket = FakeSocket()
def assertMetrics(self, values):
"""
Helper, assertions on metrics.
"""
count = len(values)
# Split packet per metric (required when buffered) and discard empty packets
packets = map(lambda x: x.split(b"\n"), self.socket.recv())
packets = reduce(lambda prev, ele: prev + ele, packets, [])
packets = list(filter(lambda x: x, packets))
# Count
self.assertEqual(
len(packets), count,
u"Metric size assertion failed: expected={expected}, received={received}".format(
expected=count, received=len(packets)
)
)
# Values
for packet in packets:
metric_value = int(packet.split(b':', 1)[1].split(b'|', 1)[0])
self.assertIn(
metric_value, values,
u"Metric assertion failed: unexpected metric value {metric_value}".format(
metric_value=metric_value
)
)
values.remove(metric_value)
def test_socket_creation(self):
"""
Socket creation plays well with multiple threads.
"""
# Create a DogStatsd client but no socket
statsd = DogStatsd()
# Submit metrics from different threads to create a socket
threads = []
for value in range(10000):
t = threading.Thread(target=statsd.gauge, args=("foo", value))
threads.append(t)
t.start()
for t in threads:
t.join()
@staticmethod
def _submit_with_multiple_threads(statsd, submit_method, values):
"""
Helper, use the given statsd client and method to submit the values
within multiple threads.
"""
threads = []
for value in values:
t = threading.Thread(
target=getattr(statsd, submit_method),
args=("foo", value)
)
threads.append(t)
t.start()
for t in threads:
t.join()
def test_increment(self):
"""
Increments can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "increment", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_decrement(self):
"""
Decrements can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
expected_value = set([-value for value in values])
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "decrement", expected_value)
# All metrics were properly submitted
self.assertMetrics(values)
def test_gauge(self):
"""
Gauges can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "gauge", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_histogram(self):
"""
Histograms can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "histogram", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_timing(self):
"""
Timings can be submitted from concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(statsd, "timing", values)
# All metrics were properly submitted
self.assertMetrics(values)
def test_send_batch_metrics(self):
"""
Metrics can be buffered, submitted from concurrent threads.
"""
with DogStatsd() as batch_statsd:
# Create a DogStatsd buffer client with a mocked socket
batch_statsd.socket = self.socket
# Samples
values = set(range(10000))
# Submit metrics from different threads
self._submit_with_multiple_threads(batch_statsd, "gauge", values)
# All metrics were properly submitted
self.assertMetrics(values)
@patch('datadog.dogstatsd.context.time')
def test_timed_decorator_threaded(self, mock_time):
"""
`timed` decorator plays well with concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Set up the mocked time
mock_time.return_value = 0
# Method to time
@statsd.timed("foo")
def bar():
"""
Wait 5 time units and return.
"""
initial_time = mock_time.return_value
while mock_time.return_value < initial_time + 2:
pass
# Run the method within multiple threads
threads = []
for value in range(10):
t = threading.Thread(target=bar)
threads.append(t)
# Bump time so that previous thread can complete
mock_time.return_value += 1
t.start()
# Sleep to let the threads start
time.sleep(0.1)
# Bump time so that all threads completes
time.sleep(0.1)
mock_time.return_value += 1
time.sleep(0.1)
mock_time.return_value += 1
for t in threads:
t.join()
# All metrics were properly submitted
expected_values = [2 for _ in range(0, 10)]
self.assertMetrics(expected_values)
@patch('datadog.dogstatsd.context.time')
def test_timed_context_manager_threaded(self, mock_time):
"""
`timed` context manager plays well with concurrent threads.
"""
# Create a DogStatsd client with a mocked socket
statsd = DogStatsd()
statsd.socket = self.socket
# Set up the mocked time
mock_time.return_value = 0
# Method to time
def bar():
"""
Wait 5 time units and return.
"""
initial_time = mock_time.return_value
with statsd.timed("foo"):
while mock_time.return_value < initial_time + 2:
pass
# Run the method within multiple threads
threads = []
for value in range(10):
t = threading.Thread(target=bar)
threads.append(t)
# Bump time so that previous thread can complete
mock_time.return_value += 1
t.start()
# Sleep to let the threads start
time.sleep(0.1)
# Bump time so that all threads completes
time.sleep(0.1)
mock_time.return_value += 1
time.sleep(0.1)
mock_time.return_value += 1
for t in threads:
t.join()
# All metrics were properly submitted
expected_values = [2 for _ in range(0, 10)]
self.assertMetrics(expected_values)
|
workers_manager.py
|
import importlib
import threading
from pip import main as pip_main
from apscheduler.schedulers.background import BackgroundScheduler
from interruptingcow import timeout
from functools import partial
from logger import _LOGGER
from workers_queue import _WORKERS_QUEUE
class WorkersManager:
class Command:
def __init__(self, callback, args=(), options=dict()):
self._callback = callback
self._args = args
self._options = options
def execute(self):
messages = []
with timeout(35):
messages = self._callback(*self._args)
_LOGGER.debug(messages)
return messages
def __init__(self):
self._mqtt_callbacks = []
self._update_commands = []
self._scheduler = BackgroundScheduler()
self._daemons = []
def register_workers(self, config):
for (worker_name, worker_config) in config['workers'].items():
module_obj = importlib.import_module("workers.%s" % worker_name)
klass = getattr(module_obj, "%sWorker" % worker_name.title())
if module_obj.REQUIREMENTS is not None:
self._pip_install_helper(module_obj.REQUIREMENTS)
worker_obj = klass(**worker_config['args'])
if hasattr(worker_obj, 'status_update'):
_LOGGER.debug("Added: %s with %d seconds interval" % (worker_name, worker_config['update_interval']))
command = self.Command(worker_obj.status_update, [])
self._update_commands.append(command)
if 'update_interval' in worker_config:
self._scheduler.add_job(
partial(self._queue_command, command), 'interval',
seconds=worker_config['update_interval'],
)
elif hasattr(worker_obj, 'run'):
_LOGGER.debug("Registered: %s as daemon" % (worker_name))
self._daemons.append(worker_obj)
else:
raise "%s cannot be initialized, it has to define run or status_update method" % worker_name
if 'topic_subscription' in worker_config:
_LOGGER.debug("Subscribing to: %s" % worker_config['topic_subscription'])
self._mqtt_callbacks.append((
worker_config['topic_subscription'],
partial(self._on_command_wrapper, worker_obj)
))
if 'topic_subscription' in config:
for (callback_name, options) in config['topic_subscription'].items():
_LOGGER.debug("Subscribing to: %s with command: %s" % (options['topic'], callback_name))
self._mqtt_callbacks.append((
options['topic'],
lambda client, _ , c: self._queue_if_matching_payload(self.Command(getattr(self, callback_name)), c.payload, options['payload']))
)
return self
def start(self, mqtt):
mqtt.callbacks_subscription(self._mqtt_callbacks)
self._scheduler.start()
self.update_all()
for daemon in self._daemons:
threading.Thread(target=daemon.run, args=[mqtt], daemon=True).start()
def _queue_if_matching_payload(self, command, payload, expected_payload):
if payload.decode('utf-8') == expected_payload:
self._queue_command(command)
def update_all(self):
_LOGGER.debug("Updating all workers")
for command in self._update_commands:
self._queue_command(command)
@staticmethod
def _queue_command(command):
_WORKERS_QUEUE.put(command)
@staticmethod
def _pip_install_helper(package_names):
for package in package_names:
pip_main(['install', '-q', package])
def _on_command_wrapper(self, worker_obj, client, _, c):
_LOGGER.debug("on command wrapper for with %s: %s", c.topic, c.payload)
self._queue_command(self.Command(worker_obj.on_command, [c.topic, c.payload]))
|
rsa_key_pair_signer.py
|
# coding:utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sched
import time
import threading
import json
import logging
import socket
from aliyunsdkcore.auth.signers.signer import Signer
from aliyunsdkcore.acs_exception import error_code
from aliyunsdkcore.acs_exception import error_msg
from aliyunsdkcore.acs_exception import exceptions
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcore.auth.algorithm import sha_hmac256
class RsaKeyPairSigner(Signer):
_MIN_SESSION_PERIOD = 900
_MAX_SESSION_PERIOD = 3600
_RETRY_DELAY_FAST = 3
_PRIORITY = 1
def __init__(self, rsa_key_pair_credential, region_id, debug=False):
if not debug and rsa_key_pair_credential.session_period < self._MIN_SESSION_PERIOD \
or rsa_key_pair_credential.session_period > self._MAX_SESSION_PERIOD:
raise exceptions.ClientException(
error_code.SDK_INVALID_SESSION_EXPIRATION,
error_msg.get_msg('SDK_INVALID_SESSION_EXPIRATION').format(self._MIN_SESSION_PERIOD,
self._MAX_SESSION_PERIOD))
rsa_key_pair_credential.region_id = region_id
self._public_key_id = rsa_key_pair_credential.public_key_id
self._private_key = rsa_key_pair_credential.private_key
self._session_period = rsa_key_pair_credential.session_period
self._schedule_interval = rsa_key_pair_credential.session_period if debug \
else max(rsa_key_pair_credential.session_period * 0.8, 5)
from aliyunsdkcore.client import AcsClient
self._sts_client = AcsClient(self._public_key_id, self._private_key, rsa_key_pair_credential.region_id)
self._session_credential = None
self._get_session_ak_and_sk()
self._scheduler = sched.scheduler(time.time, time.sleep)
self._daemon_thread = threading.Thread(target=self._refresh_session_ak_and_sk, args=[True, 0])
self._daemon_thread.setDaemon(True)
self._daemon_thread.start()
def sign(self, region_id, request):
session_ak, session_sk = self._session_credential
header = request.get_signed_header(region_id, session_ak, session_sk)
url = request.get_url(region_id, session_ak, session_sk)
return header, url
def _get_session_ak_and_sk(self):
request = GetSessionAkRequest()
request.set_method("GET")
request.set_duration_seconds(self._session_period)
try:
response_str = self._sts_client.do_action_with_exception(request)
response = json.loads(response_str.decode('utf-8'))
session_ak = str(response.get("SessionAccessKey").get("SessionAccessKeyId"))
session_sk = str(response.get("SessionAccessKey").get("SessionAccessKeySecret"))
self._session_credential = session_ak, session_sk
except exceptions.ServerException as srv_ex:
if srv_ex.error_code == 'InvalidAccessKeyId.NotFound' or srv_ex.error_code == 'SignatureDoesNotMatch':
raise exceptions.ClientException(error_code.SDK_INVALID_CREDENTIAL,
error_msg.get_msg('SDK_INVALID_CREDENTIAL'))
else:
raise
# no-limit-retry if failed with any conditions.
# fast retry in first 3 times, then the interval becomes incremental.
# the max interval is 10 minutes.
def _refresh_session_ak_and_sk(self, is_init, retry_times=0):
delay = self._schedule_interval
next_retry_time = 0
try:
if not is_init:
self._get_session_ak_and_sk()
except (Exception, socket.error) as ex:
if retry_times <= 3:
delay = self._RETRY_DELAY_FAST
else:
delay = 60 * min(10, retry_times)
next_retry_time = retry_times + 1
logging.warn(
'refresh session ak failed, auto retry after {} seconds. message = {}'.format(delay, ex))
finally:
self._scheduler.enter(delay, self._PRIORITY, self._refresh_session_ak_and_sk, [False, next_retry_time])
self._scheduler.run()
class GetSessionAkRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, product='Sts', version='2015-04-01', action_name='GenerateSessionAccessKey',
signer=sha_hmac256)
self.set_protocol_type('https')
def get_duration_seconds(self):
return self.get_query_params().get("DurationSeconds")
def set_duration_seconds(self, duration_seconds):
self.add_query_param('DurationSeconds', duration_seconds)
def get_public_key_id(self):
return self.get_query_params().get('PublicKeyId')
def set_public_key_id(self, public_key_id):
self.add_query_param('PublicKeyId', public_key_id)
|
tmp.py
|
"""
Manages uploading files from encoders to origins.
Also deals with multiple origins, timeouts, retries, and parallelism of uploads
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import threading
import time
from six.moves import queue as Queue
from uplynkcore.logger import exlog, ilog
from uplynkcore.metric.metric_batch import MetricBatcher
import uplynkrepo.constants as constants
from uplynkrepo.storage import Storage
from uplynkrepo.py3_helpers import py3_bytes
from version import version
try:
zone = open('/opt/uplynk/SERVER_ZONE', 'rb').read().decode('utf-8')
except Exception:
zone = 'unknown'
METRIC_BATCHER = MetricBatcher(60)
METRIC_BATCHER.add_dimension('version', version)
METRIC_BATCHER.add_dimension('zone', zone)
class GV:
"""
Place for global vars
"""
encoderID = "na"
if os.path.exists('/opt/uplynk/SERVER_ID'):
GV.encoderID = open('/opt/uplynk/SERVER_ID').read().strip()
MAX_UPLOAD_RETRIES = 10
UPLOAD_THREADS_PER_ORIGIN = 64
class OriginSet(object):
"""
Takes a work token as input. If the work token has multiple upload destinations
(work.dest_info and work.alt_dest_infos), then it will manage uploads to all of them.
UploadFile is called for each file to be uploaded for this work token.
WaitForUploads is called once all files for the work token have been given to UploadFile.
Will return when:
1) All files have been successfully uploaded to all destinations
2) All files have either been successfully uploaded, or have errored out for all destinations
3) All files have either been successfully uploaded, or have errored out for at least one
destination, and
secondary_timeout seconds have elapsed since WaitForUploads was called
Returns a list of destination IDs to which all files were successfully uploaded
"""
def __init__(self, work, LogS3Error, lib, info=None):
self.dests = {}
self.lib = lib
self.info = info
# first, make sure the primary origin is ready to go
if work.dest_info:
self.add_origin(work.dest_info, LogS3Error)
# next, make sure all additional origins are ready
for dest in work.get('alt_dest_infos', []):
self.add_origin(dest, LogS3Error)
if len(self.dests) <= 0:
raise Exception("No destinations available")
# finally, tell all destinations to notify us when they're done
self.notifier = Notifier()
for _, dest in self.dests.items():
dest.set_notifier(self.notifier)
def add_origin(self, dest_info, LogS3Error):
"""
Adds an origin to the origin set to upload to
"""
key = storage_key(dest_info)
if key not in self.dests:
self.dests[key] = Origin(dest_info, LogS3Error, self.lib)
else:
ilog("[OriginSet] Skipping previously added origin: %s", key)
ilog("[OriginSet] Using origins %s", list(self.dests.keys()))
def UploadFile(self, job_type, outFilename, data):
"""
Adds a file to every origin's upload queue
"""
ilog("[OriginSet] Adding File %s", outFilename)
for _, origin in self.dests.items():
origin.upload(job_type, outFilename, data, self.info)
def WaitForUploads(self, secondary_timeout):
"""
Waits for all origins to complete, or any origin to be completed by secondary_timeout
"""
ilog("[OriginSet] Waiting for uploads")
# tell origins that there are no more files coming
for _, origin in self.dests.items():
origin.finish()
# wait for at least one to finish
start = time.time()
self.notifier.wait()
# give the others a chance to finish
while self.notifier.complete < len(self.dests) and time.time() - start < secondary_timeout:
elapsed = time.time() - start
self.notifier.wait(max(0.001, secondary_timeout - elapsed))
# tell origins that haven't finished to give up
for k, origin in self.dests.items():
if not origin.finished_ok:
origin.give_up()
origin.cleanup()
completed_origins = tuple(origin.dest_info['storage_id'] \
for origin in self.dests.values() if origin.finished_ok)
ilog("[OriginSet] Done %s", completed_origins)
# tell the caller which origins completed on time
return completed_origins
def GetInitsSeen(self):
inits_seen = 0
for _, origin in self.dests.items():
inits_seen += origin.get_inits_seen()
return inits_seen
class Origin(object): # TODO: auto cleanup after so much time of inactivity
"""
Negotiates uploading N files to 1 origin.
Keeps track of success/retry/failure
Notifies a notifier when completed
Can be told to give up
"""
def __init__(self, dest_info, LogS3Error, lib):
self.LogS3Error = LogS3Error
self.lib = lib
self.dest_info = dest_info
self.keep_running = True
self.input_queue = Queue.Queue()
self.lock = threading.Lock()
self.set_notifier(None) # initialize notifier and other flags
self.give_up_trying = False
self.finished = False
self.all_uploaded_ok = True
self.finished_ok = False
self.init_files_seen = 0
# start threads
self.threads = []
for _ in range(UPLOAD_THREADS_PER_ORIGIN):
self.threads.append(start_thread(self.upload_worker))
ilog("[Origin-%s] Initialized", self.dest_info['storage_id'])
def check_threads(self):
"""
Periodically check that our threads are alive and restart them if they die
"""
for thread in self.threads[:]:
if not thread.isAlive():
self.threads.remove(thread)
ilog('[Origin-%s] Restarting upload thread', self.dest_info['storage_id'])
self.threads.append(start_thread(self.upload_worker))
def upload(self, job_type, outFilename, data, info=None):
"""
Adds a file to our upload queue
"""
self.check_threads()
# TODO: not sure if we really need to lock here,
# since this /should/ always be called on the same thread as self.finish
self.lock.acquire()
self.input_queue.put((job_type, outFilename, data, info))
self.file_count += 1
self.lock.release()
def set_notifier(self, notifier):
"""
Sets the notifier to tell when we are complete
"""
self.notifier = notifier
self.file_count = 0
self.finished_ok = False
self.finished = False
self.all_uploaded_ok = True
self.give_up_trying = False
def finish(self):
"""
Sets our final state (ok / all uploaded ok)
Then notifies our notifier
"""
self.lock.acquire()
self.finished = True
if self.file_count == 0:
self.finished_ok = self.all_uploaded_ok
self.notifier.notify()
self.lock.release()
def give_up(self):
'''
Clean out upload queues and signal upload workers to exit
'''
ilog('[Origin-%s] Giving up', self.dest_info['storage_id'])
self.give_up_trying = True
while self.input_queue.qsize() > 0:
try:
self.input_queue.get_nowait()
except Queue.Empty():
pass
def get_inits_seen(self):
return self.init_files_seen
def cleanup(self):
'''
Signal workers to shutdown
Join all threads
'''
ilog('[Origin-%s] Cleaning up', self.dest_info['storage_id'])
# tell worker threads to shut down
self.keep_running = False
self.give_up_trying = True
for _ in range(len(self.threads)):
self.input_queue.put(None)
# wait for threads to shut down
for thread in self.threads:
thread.join()
self.threads = []
ilog('[Origin-%s] Cleaned', self.dest_info['storage_id'])
def upload_worker(self):
'''
Read from upload queues and send to uploader
'''
uploader = None
try:
uploader = Storage.FromStorageInfo('push', self.dest_info, useCS3=True, lib=self.lib)
uploader.SetPublicRead(True)
if callable(getattr(uploader, 'SetUserAgent', None)):
uploader.SetUserAgent("uplynk encoder 1.0")
except Exception:
exlog('[Origin-%s-W] Error initializing storage', self.dest_info['storage_id'])
try:
while self.keep_running:
# wait for upload work
msg = self.input_queue.get()
if msg is None:
break
job_type, outFilename, data, info = msg
try:
cloud_type_name = ("" if getattr(uploader, 'cloud', None) is None else "/" + uploader.cloud)
ilog('[Origin-%s%s-W] File %s: Starting',
self.dest_info['storage_id'], cloud_type_name, outFilename)
#perform upload
if not self.perform_upload(uploader, job_type, outFilename, data, info):
ilog('[Origin-%s%s-W] File %s: Failed upload',
self.dest_info['storage_id'], cloud_type_name, outFilename)
self.all_uploaded_ok = False
else:
ilog('[Origin-%s%s-W] File %s: Uploaded',
self.dest_info['storage_id'], cloud_type_name, outFilename)
# notify about completion
self.lock.acquire()
self.file_count -= 1
if self.file_count == 0 and self.finished:
self.finished_ok = self.all_uploaded_ok
self.notifier.notify()
self.lock.release()
except Exception as e:
exlog('[Origin-%s-W] File %s: Error: %s',
self.dest_info['storage_id'],
outFilename,
str(e))
except Exception:
exlog('[Origin-%s-W] Error performing upload', self.dest_info['storage_id'])
finally:
# clean up uploader handle
if uploader:
# If the uploader did not initialize properly, it won't be available, and this will
# throw an exception.
uploader.Close()
ilog('[Origin-%s-W] Upload worker exiting', self.dest_info['storage_id'])
def perform_upload(self, uploader, job_type, outFilename, data, info=None):
'''
Upload a single file, try to do it well (use timeouts and retries)
'''
if not uploader:
ilog('[Origin-%s-W] No uploader', self.dest_info['storage_id'])
return False
if "_init." in outFilename:
self.init_files_seen += 1
total_retries = 0
try:
if os.getenv('UPLYNK_IGNOREUPLOADTIMEOUTS', None) is None:
cbP2C = getattr(info, 'callback', None)
if job_type == 'live' and cbP2C is None:
if len(data) < 1048576: # if < 1MB
timeout = 1000
else:
timeout = 2000
else:
timeout = 30000
if hasattr(uploader, 'SetBandwidthTimeout'):
# time out if the upload bandwidth is lower than minKbps for longer than maxMS
uploader.SetBandwidthTimeout(minKbps=700, maxMS=4000)
else:
timeout = 30000
for retry in range(MAX_UPLOAD_RETRIES):
if self.give_up_trying:
break
try:
beamID = self.dest_info['beamID']
ilog('[Origin-%s-U] File %s: %s upload with timeout %s and size %d and beam %s',
self.dest_info['storage_id'], outFilename,
"Starting" if retry == 0 else "Retrying", timeout, len(data), beamID)
contentType = 'application/octet-stream'
if outFilename.endswith('.jpg'):
contentType = 'image/jpeg'
elif outFilename.endswith('.m4s'): # segment type used for subtitles in fmp4
contentType = 'application/mp4'
elif outFilename.endswith('.vtt'):
contentType = 'text/vtt'
uploader.Upload(outFilename,
py3_bytes(data),
timeout=timeout,
dnsTimeout=-1 if retry == 0 else 0,
contentType=contentType,
info=info)
ilog('[Origin-%s-U] File %s: Finished upload, Beam: %s',
self.dest_info['storage_id'], outFilename, beamID)
size_dim = "size:%s" % ("LT_1_MB" if len(data) < 1048576 else "GT_1_MB")
METRIC_BATCHER.inc('enc_ul_upload_retries',
dimensions=["retries:%s" % retry,
size_dim,
"uploadStatus:%s" % "SUCCESS"])
return True
except Exception as e:
if uploader.cloud == constants.AZURE:
ilog('exception: {}'.format(str(e)))
continue
METRIC_BATCHER.inc('enc_ul_timeout')
exlog.local(str(e))
self.LogS3Error('upload', outFilename, uploader)
ilog('[Origin-%s-U] File %s: headers in: %s',
self.dest_info['storage_id'], outFilename, uploader.headersIn)
ilog('[Origin-%s-U] File %s: headers out: %s',
self.dest_info['storage_id'], outFilename, uploader.headersOut)
if retry > 1:
# oh dear, we can't seem to upload faster than
# our min bandwidth after several tries
# disable the bandwidth limit for the next retries
ilog('[Origin-%s-U] File %s: Removing bandiwdth restriction and ' \
'doubling timeout',
self.dest_info['storage_id'], outFilename)
if hasattr(uploader, 'SetBandwidthTimeout'):
uploader.SetBandwidthTimeout(0, 0)
# retry with 2s, 4s a couple of times before going bigger
timeoutVals = [2, 2, 4, 4, 4, 7, 10, 15, 20, 25]
timeout = timeoutVals[retry] * 1000
total_retries = retry
except Exception as e:
exlog(str(e))
dim_size = "size:%s" % ("LT_1_MB" if len(data) < 1048576 else "GT_1_MB")
METRIC_BATCHER.inc('enc_ul_upload_retries', dimensions=["retries:%s" % total_retries,
dim_size,
"uploadStatus:%s" % "FAIL"])
return False
def start_thread(target, *args, **kwargs):
'''Helper method to start a thread'''
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.setDaemon(True)
thread.start()
return thread
class Notifier(object):
'''Cheesy class to make it so we can wait on all origins simultaneously'''
def __init__(self):
# TODO: I worry about creating a Queue (and the resources like mutexes that go with it)
# for every work token. But the only alternative I can think of is to poll,
# which seems like a bad idea...
self.queue = Queue.Queue()
self.complete = 0
def notify(self):
'''Origin class instances call this when they finish uploading'''
self.queue.put(None)
def wait(self, timeout=None):
'''OriginSet calls this to wait for the next origin to finish'''
try:
self.queue.get(timeout=timeout)
except Queue.Empty:
pass
else:
self.complete += 1
def storage_key(dest_info):
"""
not sure if storage_id is the right way to go,
so made this a function to make it easy to change in the future
"""
return dest_info['beamID'] + '_' + dest_info['storage_id']
|
test_interface.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from shutil import rmtree
from threading import Thread
from time import sleep
from os.path import exists
import mycroft.audio
from mycroft.util import create_signal, check_for_signal
"""
Tests for public interface for audio interface
"""
done_waiting = False
def wait_while_speaking_thread():
global done_waiting
mycroft.audio.wait_while_speaking()
done_waiting = True
class TestInterface(unittest.TestCase):
def setUp(self):
if exists('/tmp/mycroft'):
rmtree('/tmp/mycroft')
def test_is_speaking(self):
create_signal('isSpeaking')
self.assertTrue(mycroft.audio.is_speaking())
# Check that the signal hasn't been removed
self.assertTrue(check_for_signal('isSpeaking'))
self.assertFalse(mycroft.audio.is_speaking())
def test_wait_while_speaking(self):
# Check that test terminates
create_signal('isSpeaking')
Thread(target=wait_while_speaking_thread).start()
sleep(2)
self.assertFalse(done_waiting)
check_for_signal('isSpeaking')
sleep(2)
self.assertTrue(done_waiting)
if __name__ == "__main__":
unittest.main()
|
net.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Test helpers for networking.
"""
import os
import re
import requests
import socket
import threading
import time
from ptvsd.common import compat, fmt, log
from tests.patterns import some
def get_test_server_port(start, stop):
"""Returns a server port number that can be safely used for listening without
clashing with another test worker process, when running with pytest-xdist.
If multiple test workers invoke this function with the same min value, each of
them will receive a different number that is not lower than start (but may be
higher). If the resulting value is >=stop, it is a fatal error.
Note that if multiple test workers invoke this function with different ranges
that overlap, conflicts are possible!
"""
try:
worker_id = compat.force_ascii(os.environ["PYTEST_XDIST_WORKER"])
except KeyError:
n = 0
else:
assert worker_id == some.bytes.matching(
br"gw(\d+)"
), "Unrecognized PYTEST_XDIST_WORKER format"
n = int(worker_id[2:])
port = start + n
assert port <= stop
return port
def find_http_url(text):
match = re.search(r"https?://[-.0-9A-Za-z]+(:\d+)/?", text)
return match.group() if match else None
def wait_until_port_is_listening(port, interval=1, max_attempts=1000):
"""Blocks until the specified TCP port on localhost is listening, and can be
connected to.
Tries to connect to the port periodically, and repeats until connection succeeds.
Connection is immediately closed before returning.
"""
for i in compat.xrange(1, max_attempts + 1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
log.info("Probing localhost:{0} (attempt {1})...", port, i)
sock.connect(("localhost", port))
except socket.error as exc:
# The first attempt will almost always fail, because the port isn't
# open yet. But if it keeps failing after that, we want to know why.
if i > 1:
log.warning("Failed to connect to localhost:{0}:\n{1}", port, exc)
time.sleep(interval)
else:
log.info("localhost:{0} is listening - server is up!", port)
return
finally:
sock.close()
class WebRequest(object):
"""An async wrapper around requests.
"""
@staticmethod
def get(*args, **kwargs):
return WebRequest("get", *args, **kwargs)
@staticmethod
def post(*args, **kwargs):
return WebRequest("post", *args, **kwargs)
def __init__(self, method, url, *args, **kwargs):
"""Invokes requests.method(url, *args, **kwargs) on a background thread,
and immediately returns.
If method() raises an exception, it is logged, unless log_errors=False.
"""
self.method = method
self.url = url
self.log_errors = kwargs.pop("log_errors", True)
self.request = None
"""The underlying requests.Request object.
Not set until wait_for_response() returns.
"""
self.exception = None
"""Exception that occurred while performing the request, if any.
Not set until wait_for_response() returns.
"""
log.info("{0}", self)
func = getattr(requests, method)
self._worker_thread = threading.Thread(
target=lambda: self._worker(func, *args, **kwargs),
name=fmt("WebRequest({0})", self),
)
self._worker_thread.daemon = True
self._worker_thread.start()
def __str__(self):
return fmt("HTTP {0} {1}", self.method.upper(), self.url)
def _worker(self, func, *args, **kwargs):
try:
self.request = func(self.url, *args, **kwargs)
except Exception as exc:
if self.log_errors:
log.exception("{0} failed:", self)
self.exception = exc
else:
log.info(
"{0} --> {1} {2}", self, self.request.status_code, self.request.reason
)
def wait_for_response(self, timeout=None):
"""Blocks until the request completes, and returns self.request.
"""
if self._worker_thread.is_alive():
log.info("Waiting for response to {0} ...", self)
self._worker_thread.join(timeout)
if self.exception is not None:
raise self.exception
return self.request
def response_text(self):
"""Blocks until the request completes, and returns the response body.
"""
return self.wait_for_response().text
class WebServer(object):
"""Interacts with a web server listening on localhost on the specified port.
"""
def __init__(self, port):
self.port = port
self.url = fmt("http://localhost:{0}", port)
def __enter__(self):
"""Blocks until the server starts listening on self.port.
"""
log.info("Web server expected on {0}", self.url)
wait_until_port_is_listening(self.port, interval=3)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Sends an HTTP /exit GET request to the server.
The server is expected to terminate its process while handling that request.
"""
self.get("/exit", log_errors=False)
def get(self, path, *args, **kwargs):
return WebRequest.get(self.url + path, *args, **kwargs)
def post(self, path, *args, **kwargs):
return WebRequest.post(self.url + path, *args, **kwargs)
|
_mDNS.py
|
import mdns
import socket
import struct
import threading
HANDLERS = []
def handler(func):
if func not in HANDLERS:
HANDLERS.append(func)
return func
class dnssocket:
def __init__(self, proto='ipv4', address=None, broadcast_ip=None) -> None:
if proto not in ('ipv4', 'ipv6'):
raise ValueError("Invalid proto - expected one of {}".format(('ipv4', 'ipv6')))
self._af_type = socket.AF_INET if proto == 'ipv4' else socket.AF_INET6
self.sock = socket.socket(self._af_type, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if proto == 'ipv4':
self._broadcast_ip = mdns.types.MDNS_IPV4_MCAST_IP if not broadcast_ip else broadcast_ip
self._address = (self._broadcast_ip, 5353)
bind_address = "0.0.0.0"
mreq = socket.inet_aton(self._broadcast_ip)
if address is not None:
mreq += socket.inet_aton(address)
else:
mreq += struct.pack(b"@I", socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq,)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
elif proto == "ipv6":
self._broadcast_ip = mdns.types.MDNS_IPV6_MCAST_IP if not broadcast_ip else broadcast_ip
self._address = (self._broadcast_ip, 5353, 0, 0)
mreq = socket.inet_pton(socket.AF_INET6, self._broadcast_ip)
mreq += socket.inet_pton(socket.AF_INET6, "::")
self.sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq,)
self.sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
bind_address = "::"
self.sock.bind((bind_address, 5353))
MDNS_SOCKET = dnssocket()
def sendN(data, s=MDNS_SOCKET):
return s.sock.sendto(data, s._address)
def startup(s=MDNS_SOCKET, count=-1):
th = threading.Thread(target=__thread_delegate, args=(s, count))
th.start()
def __thread_delegate(s=MDNS_SOCKET, count=-1):
try:
c = 0
while c < count and count > 0:
data, address = s.sock.recvfrom(1024)
packet = mdns.loadm(data)
__delegate_exec(packet, address)
c += 1
except Exception as e:
print('Stopped at <Exception e="%s" |>' % (e))
def __delegate_exec(packet, addr):
for _h in HANDLERS:
try:
_h(packet, addr)
except:
pass
|
Win32Service.py
|
#encoding=utf-8
import win32serviceutil
import win32service
import win32event
import os
import logging
import inspect
import servicemanager
import sys
import win32timezone
import threading
from main import Bing
bing = Bing()
class PythonService(win32serviceutil.ServiceFramework):
_svc_name_ = "BingWallpaper" #服务名
_svc_display_name_ = "BingWallpaper Service" #服务在windows系统中显示的名称
_svc_description_ = "BingWallpaper Service Collector " #服务的描述
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.logger = self._getLogger()
self.run = True
def _getLogger(self):
logger = logging.getLogger('[PythonService]')
this_file = inspect.getfile(inspect.currentframe())
dirpath = os.path.abspath("D:\\Repositories\\BingDailyWallpaper\\logs")
handler = logging.FileHandler(os.path.join(dirpath, "service.log"))
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def SvcDoRun(self):
import time
# try:
# from main import Bing
# bing = Bing()
# thread = threading.Thread(target=bing.run, kwargs={"waiting":10})
# thread.start()
# except Exception as err:
# self.logger.info(str(err))
# self.logger.info("service is run....")
# while self.run:
# # self.logger.info("I am runing....")
# time.sleep(2)
bing.run()
def SvcStop(self):
self.logger.info("service is stop....")
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
os.system("taskkill /IM Win32Service.exe /F")
self.run = False
if __name__=='__main__':
if len(sys.argv) == 1:
try:
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(PythonService)
servicemanager.Initialize('PythonService', evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as details:
pass
else:
win32serviceutil.HandleCommandLine(PythonService)
|
clamscanner.py
|
#!/usr/bin/env python3
import json
import queue
import sys
import time
import argparse
import threading
from multiprocessing import cpu_count
from pathlib import Path
from subprocess import Popen, PIPE, TimeoutExpired
from threading import Thread, Lock, Event
from typing import List, Tuple, Iterator, Set, Optional, NamedTuple, TextIO
SCAN_COMMAND = Tuple[str, Path]
class AlreadyScannedCache(NamedTuple):
cache: Set[Path]
lock: Lock
def generate_scan_commands(path: Path, already_scanned: Optional[AlreadyScannedCache] = None) -> Iterator[SCAN_COMMAND]:
try:
resolved = path.resolve(strict=True)
if already_scanned is not None:
with already_scanned.lock:
if resolved in already_scanned.cache:
print(f"{path}: SKIP")
return
already_scanned.cache.add(resolved)
except (FileNotFoundError, RuntimeError):
return
if path.is_file():
yield ["FILE_SCAN", path]
return
yield ["DIR_OPEN", path]
try:
for child in sorted(path.iterdir(), key=lambda x: 0 if x.is_dir() else 1):
for sub in generate_scan_commands(child, already_scanned):
yield sub
except (PermissionError, OSError):
pass
yield ["DIR_DONE", path]
def scan(path: Path, log_file: Optional[TextIO], file_cache: Optional[Path] = None) -> None:
skip_files: Set[Path] = set()
if file_cache is not None and file_cache.exists():
with file_cache.open('r') as f:
skip_files = set(filter(
lambda x: x.is_file(),
map(lambda x: Path(x), json.load(f))
))
print(f'Loaded {len(skip_files)} entries from cache')
already_scanned_cache = AlreadyScannedCache(cache=skip_files, lock=Lock())
commands: queue.Queue[Tuple[int, SCAN_COMMAND]] = queue.Queue(maxsize=cpu_count() * 20)
commands_unfinished = Event()
commands_unfinished.set()
log_lines_to_write: queue.Queue[Tuple[TextIO, str]] = queue.Queue(maxsize=cpu_count() * 100)
counter = {
'scanned-files': 0,
'infected-files': 0,
'time-start': time.time()
}
def thread_commands_generator():
for i, cmd in enumerate(generate_scan_commands(path, already_scanned_cache)):
while True:
if not commands_unfinished.is_set():
return
try:
commands.put((i, cmd), timeout=5)
break
except queue.Full:
pass
print("\n[SCANNER] Command generation finished")
commands_unfinished.clear()
def thread_write_log():
last_line_length = 0
while True:
if not commands_unfinished.is_set() and log_lines_to_write.empty():
alive_threads = sum(map(lambda _: 1, filter(lambda x: x.is_alive(), threading.enumerate())))
if alive_threads <= 2:
break
print(f'Waiting for log to finish, threads alive: {alive_threads}')
try:
console, line = log_lines_to_write.get(timeout=1)
is_infected = not line.endswith(': OK')
spaces = " " * (last_line_length - len(line))
print(line + spaces, end='\n' if is_infected else '\r', file=console, flush=True)
if log_file is not None:
log_file.write(line + '\n')
last_line_length = len(line)
except queue.Empty:
pass
def thread_scanning():
while True:
if not commands_unfinished.is_set():
print("\n[SCANNER] Thread finished")
break
try:
command_id, (command, target) = commands.get(timeout=1)
try:
if command == "FILE_SCAN":
p = Popen(["clamdscan", "--fdpass", "--no-summary", target.absolute()],
stdin=PIPE,
stderr=PIPE,
stdout=PIPE,
text=True
)
time_start = time.time()
while True:
try:
stdout, stderr = p.communicate(input=None, timeout=30)
break
except TimeoutExpired:
print(f"\n[SCANNER] "
f"{target} taking longer than it should ({int(time.time() - time_start)}s)")
stdout, stderr = stdout.strip(), stderr.strip()
if stdout:
counter['scanned-files'] += 1
if not stdout.endswith(': OK'):
counter['infected-files'] += 1
log_lines_to_write.put((sys.stdout, stdout))
if stderr:
log_lines_to_write.put((sys.stderr, stderr))
finally:
pass
except queue.Empty:
print("\n[SCANNER] ... nothing to do")
pass
threads: List[Thread] = []
for f in (thread_commands_generator, thread_write_log):
t = Thread(target=f)
t.setDaemon(True)
t.start()
threads.append(t)
print(f"\n[SCANNER] Starting {cpu_count()} scanning threads")
for _ in range(cpu_count()):
t = Thread(target=thread_scanning)
threads.append(t)
t.start()
try:
for t in threads:
t.join()
print()
print('=========================================')
print(f'Scan competed in {int(time.time() - counter["time-start"])} s')
print(f'Scanned files: {counter["scanned-files"]}')
print(f'Infected files: {counter["infected-files"]}')
except KeyboardInterrupt:
print("^C received, ending")
if file_cache is not None:
print(f'Saving cache to {file_cache}')
with already_scanned_cache.lock:
with file_cache.open('w') as f:
json.dump(
list(
map(lambda x: str(x),
filter(
lambda x: x.is_file(),
already_scanned_cache.cache
)
)
),
f
)
print('Cache saved')
commands_unfinished.clear()
print('Waiting for other thread to terminate')
for t in threads:
t.join()
print('Finishing')
def main() -> None:
parser = argparse.ArgumentParser(description='Recursive and fast scan')
parser.add_argument('path', metavar='path', type=str,
help='a path to scan')
parser.add_argument('--log', dest='log', type=str, help='log file to write information to', default=None)
parser.add_argument('--cache', dest='cache', type=str, help='where to store scanned cache info', default=None)
args = parser.parse_args()
log_file: Optional[TextIO] = None
if args.log is not None:
log_file = open(args.log, 'a')
scan(Path(args.path), log_file, Path(args.cache) if args.cache is not None else None)
if log_file is not None:
log_file.flush()
log_file.close()
if __name__ == '__main__':
main()
|
test_cpu_sampler.py
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import time
import unittest
import random
import threading
import sys
import traceback
from instana.autoprofile.profiler import Profiler
from instana.autoprofile.runtime import runtime_info
from instana.autoprofile.samplers.cpu_sampler import CPUSampler
class CPUSamplerTestCase(unittest.TestCase):
def test_cpu_profile(self):
if runtime_info.OS_WIN:
return
profiler = Profiler(None)
profiler.start(disable_timers=True)
sampler = CPUSampler(profiler)
sampler.setup()
sampler.reset()
def record():
sampler.start_sampler()
time.sleep(2)
sampler.stop_sampler()
record_t = threading.Thread(target=record)
record_t.start()
def cpu_work_main_thread():
for i in range(0, 1000000):
text = "text1" + str(i)
text = text + "text2"
cpu_work_main_thread()
record_t.join()
profile = sampler.build_profile(2000, 120000).to_dict()
#print(profile)
self.assertTrue('cpu_work_main_thread' in str(profile))
if __name__ == '__main__':
unittest.main()
|
asyncorereactor.py
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread
import time
import weakref
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map
_dispatcher_map = {}
if cls._loop:
cls._loop._cleanup()
cls._loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map)
self._writable = True
self._readable = True
self._send_options_message()
# start the event loop if needed
self._loop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
asyncore.dispatcher.close(self)
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
#This happens when the connection is shutdown while waiting for the ReadyMessage
if not self.connected_event.is_set():
self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host)
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
self._loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
|
VideoDisplay.py
|
import cv2
import os
import threading
from PyQt5.QtWidgets import QFileDialog, QLCDNumber
from PyQt5.QtGui import QImage, QPixmap
# from test import test as test, cut_picture
from test import cut_picture
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class Display:
def __init__(self, ui, mainWnd):
self.ui = ui
self.mainWnd = mainWnd
# 默认视频源为相机
self.ui.radioButtonCam.setChecked(True)
self.isCamera = True
self.isOpen = False
self.Attenttiveness()
# 信号槽设置
ui.Open.clicked.connect(self.Open)
ui.Close.clicked.connect(self.Close)
ui.radioButtonCam.clicked.connect(self.radioButtonCam)
ui.radioButtonFile.clicked.connect(self.radioButtonFile)
# 创建一个关闭事件并设为未触发
self.stopEvent = threading.Event()
self.stopEvent.clear()
def radioButtonCam(self):
self.isCamera = True
def radioButtonFile(self):
self.isCamera = False
def Open(self):
if not self.isCamera:
self.fileName, self.fileType = QFileDialog.getOpenFileName(self.mainWnd, 'Choose file', '', '*.mp4')
# 线程会死锁,原因未知...
self.cap = cv2.VideoCapture(self.fileName)
self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
# th = threading.Thread(target=self.Display)
# lock = threading.Lock()
# th.start()
# th.run()
# self.Display()
f = 1
i = 1
score = 0.00
time = 1000
while self.cap.isOpened():
self.cap.set(cv2.CAP_PROP_POS_MSEC, int(time / 1000 * 12.333))
time += 1000
success, frame = self.cap.read()
# RGB转BGR
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
self.ui.DispalyLabel.setPixmap(QPixmap.fromImage(img))
if self.isCamera:
cv2.waitKey(1)
else:
cv2.waitKey(int(1000 / self.frameRate))
# 判断关闭事件是否已触发
if self.stopEvent.is_set():
# 关闭事件置为未触发,清空显示label
self.stopEvent.clear()
self.ui.DispalyLabel.clear()
self.ui.Close.setEnabled(False)
self.ui.Open.setEnabled(True)
break
timeF = 100 # 视频帧计数间隔频率
if f % timeF == 0: # 每隔timeF帧进行存储操作
cv2.imwrite('picture/cut0.jpg', frame) # 存储为图像
i = i + 1
print("i:" + str(i) + ',frame:' + str(f))
try:
score = cut_picture()
except OSError:
print(OSError)
pass
finally:
self.ui.Attentiveness.display(score)
f = f + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
# 下面两种rtsp格式都是支持的
# cap = cv2.VideoCapture("rtsp://admin:Supcon1304@172.20.1.126/main/Channels/1")
# self.cap = cv2.VideoCapture("rtsp://admin:Supcon1304@172.20.1.126:554/h264/ch1/main/av_stream")
self.cap = cv2.VideoCapture(0)
# 创建视频显示线程
th = threading.Thread(target=self.Display)
th.start()
# cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 0.005, (640, 480))
f = 1
i = 1
score = 0.00
while self.cap.isOpened(): # 判断是否正常打开
rval, frame = self.cap.read()
# cv2.imshow("img", frame)
timeF = 100 # 视频帧计数间隔频率
if f % timeF == 0: # 每隔timeF帧进行存储操作
cv2.imwrite('picture/cut0.jpg', frame) # 存储为图像
i = i + 1
print("i:" + str(i) + ',frame:' + str(f))
try:
score = cut_picture()
except OSError:
print(OSError)
pass
finally:
self.ui.Attentiveness.display(score)
f = f + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def Close(self):
# 关闭事件设为触发,关闭视频播放
self.stopEvent.set()
self.isOpen = False
def Attenttiveness(self):
show = self.ui.Attentiveness
show.display(0.00)
# if self.isOpen:
# show.display(test(self))
def Display(self):
self.ui.Open.setEnabled(False)
self.ui.Close.setEnabled(True)
while self.cap.isOpened():
success, frame = self.cap.read()
# RGB转BGR
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
self.ui.DispalyLabel.setPixmap(QPixmap.fromImage(img))
if self.isCamera:
cv2.waitKey(1)
else:
cv2.waitKey(int(1000 / self.frameRate))
# 判断关闭事件是否已触发
if self.stopEvent.is_set():
# 关闭事件置为未触发,清空显示label
self.stopEvent.clear()
self.ui.DispalyLabel.clear()
self.ui.Close.setEnabled(False)
self.ui.Open.setEnabled(True)
break
|
streaming.py
|
"""
Streaming Parallel Data Processing
===================================================================
Neuraxle steps for streaming data in parallel in the pipeline
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
from abc import abstractmethod
from multiprocessing import Queue
from multiprocessing.context import Process
from threading import Thread
from typing import Tuple, List, Union, Iterable
from neuraxle.base import NamedTupleList, ExecutionContext, BaseStep, MetaStepMixin, NonFittableMixin, BaseSaver
from neuraxle.data_container import DataContainer, ListDataContainer
from neuraxle.pipeline import Pipeline, CustomPipelineMixin, MiniBatchSequentialPipeline, Joiner
from neuraxle.steps.numpy import NumpyConcatenateOuterBatch
class ObservableQueueMixin:
"""
A class to represent a step that can put items in a queue.
It can also notify other queues that have subscribed to him using subscribe.
.. seealso::
:class:`BaseStep`,
:class:`QueuedPipelineTask`,
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, queue):
self.queue = queue
self.observers = []
self._ensure_proper_mixin_init_order()
def _ensure_proper_mixin_init_order(self):
if not hasattr(self, 'savers'):
warnings.warn(
'Please initialize Mixins in the good order. ObservableQueueMixin should be initialized after '
'Appending the ObservableQueueStepSaver to the savers. Saving might fail.'
)
self.savers = [ObservableQueueStepSaver()]
else:
self.savers.append(ObservableQueueStepSaver())
def subscribe(self, observer_queue_worker: 'ObservableQueueMixin') -> 'ObservableQueueMixin':
"""
Subscribe a queue worker.
The subscribed queue workers get notified when :func:`~neuraxle.distributed.streaming.ObservableQueueMixin.notify` is called.
"""
self.observers.append(observer_queue_worker.queue)
return self
def get(self) -> 'QueuedPipelineTask':
"""
Get last item in queue.
"""
return self.queue.get()
def put(self, value: DataContainer):
"""
Put a queued pipeline task in queue.
"""
self.queue.put(QueuedPipelineTask(step_name=self.name, data_container=value.copy()))
def notify(self, value):
"""
Notify all subscribed queue workers
"""
for observer in self.observers:
observer.put(value)
class QueuedPipelineTask(object):
"""
Data object to contain the tasks processed by the queued pipeline.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def __init__(self, data_container, step_name=None):
self.step_name = step_name
self.data_container = data_container
class ObservableQueueStepSaver(BaseSaver):
"""
Saver for observable queue steps.
.. seealso::
:class:`QueueWorker`,
:class:`neuraxle.base.BaseSaver`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`SequentialQueuedPipeline`
"""
def save_step(self, step: 'BaseStep', context: 'ExecutionContext') -> 'BaseStep':
step.queue = None
step.observers = []
return step
def can_load(self, step: 'BaseStep', context: 'ExecutionContext'):
return True
def load_step(self, step: 'BaseStep', context: 'ExecutionContext') -> 'BaseStep':
step.queue = Queue()
return step
class QueueWorker(ObservableQueueMixin, MetaStepMixin, BaseStep):
"""
Start multiple Process or Thread that process items from the queue of batches to process.
It is both an observable, and observer.
It notifies the results of the wrapped step handle transform method.
It receives the next data container to process.
.. seealso::
:class:`Observer`,
:class:`Observable`,
:class:`MetaStepMixin`,
:class:`BaseStep`
"""
def __init__(
self,
wrapped: BaseStep,
max_queue_size: int,
n_workers: int,
use_threading: bool,
additional_worker_arguments=None,
use_savers=False
):
if not additional_worker_arguments:
additional_worker_arguments = [[] for _ in range(n_workers)]
BaseStep.__init__(self)
MetaStepMixin.__init__(self, wrapped)
ObservableQueueMixin.__init__(self, Queue(maxsize=max_queue_size))
self.use_threading: bool = use_threading
self.workers: List[Process] = []
self.n_workers: int = n_workers
self.observers: List[Queue] = []
self.additional_worker_arguments = additional_worker_arguments
self.use_savers = use_savers
def start(self, context: ExecutionContext):
"""
Start multiple processes or threads with the worker function as a target.
:param context: execution context
:type context: ExecutionContext
:return:
"""
target_function = worker_function
if self.use_savers:
self.save(context, full_dump=True)
target_function = worker_function
self.workers = []
for _, worker_arguments in zip(range(self.n_workers), self.additional_worker_arguments):
if self.use_threading:
p = Thread(target=target_function, args=(self, context, self.use_savers, worker_arguments))
else:
p = Process(target=target_function, args=(self, context, self.use_savers, worker_arguments))
p.daemon = True
p.start()
self.workers.append(p)
def stop(self):
"""
Stop all of the workers.
:return:
"""
if not self.use_threading:
[w.terminate() for w in self.workers]
self.workers = []
self.observers = []
def worker_function(queue_worker: QueueWorker, context: ExecutionContext, use_savers: bool, additional_worker_arguments):
"""
Worker function that transforms the items inside the queue of items to process.
:param queue_worker: step to transform
:param context: execution context
:param use_savers: use savers
:param additional_worker_arguments: any additional arguments that need to be passed to the workers
:return:
"""
step = queue_worker.get_step()
if use_savers:
saved_queue_worker: QueueWorker = context.load(queue_worker.get_name())
step = saved_queue_worker.get_step()
additional_worker_arguments = tuple(
additional_worker_arguments[i: i + 2] for i in range(0, len(additional_worker_arguments), 2)
)
for argument_name, argument_value in additional_worker_arguments:
step.__dict__.update({argument_name: argument_value})
while True:
task: QueuedPipelineTask = queue_worker.get()
summary_id = task.data_container.summary_id
data_container = step.handle_transform(task.data_container, context)
data_container = data_container.set_summary_id(summary_id)
queue_worker.notify(QueuedPipelineTask(step_name=queue_worker.name, data_container=data_container))
QueuedPipelineStepsTuple = Union[
BaseStep, # step
Tuple[int, BaseStep], # (n_workers, step)
Tuple[str, BaseStep], # (step_name, step)
Tuple[str, int, BaseStep], # (step_name, n_workers, step)
Tuple[str, int, int, BaseStep], # (step_name, n_workers, max_queue_size, step)
Tuple[str, int, List[Tuple], BaseStep], # (step_name, n_workers, additional_worker_arguments, step)
Tuple[str, int, List[Tuple], BaseStep] # (step_name, n_workers, additional_worker_arguments, step)
]
class BaseQueuedPipeline(MiniBatchSequentialPipeline):
"""
Sub class of :class:`Pipeline`.
Transform data in many pipeline steps at once in parallel in the pipeline using multiprocessing Queues.
Example usage :
.. code-block:: python
# step name, step
p = QueuedPipeline([
('step_a', Identity()),
('step_b', Identity()),
], n_workers=1, batch_size=10, max_queue_size=10)
# step name, number of workers, step
p = QueuedPipeline([
('step_a', 1, Identity()),
('step_b', 1, Identity()),
], batch_size=10, max_queue_size=10)
# step name, number of workers, and max size
p = QueuedPipeline([
('step_a', 1, 10, Identity()),
('step_b', 1, 10, Identity()),
], batch_size=10)
# step name, number of workers for each step, and additional argument for each worker
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
# step name, number of workers for each step, additional argument for each worker, and max size
p = QueuedPipeline([
('step_a', 1, [('host', 'host1'), ('host', 'host2')], 10, Identity())
], batch_size=10)
.. seealso::
:class:`QueueWorker`,
:class:`QueueJoiner`,
:class:`CustomPipelineMixin`,
:class:`Pipeline`
"""
def __init__(
self,
steps: List[QueuedPipelineStepsTuple],
batch_size,
n_workers_per_step=None,
max_queue_size=None,
data_joiner=None,
use_threading=False,
use_savers=False,
cache_folder=None
):
NonFittableMixin.__init__(self)
CustomPipelineMixin.__init__(self)
if data_joiner is None:
data_joiner = NumpyConcatenateOuterBatch()
self.data_joiner = data_joiner
self.max_queue_size = max_queue_size
self.batch_size = batch_size
self.n_workers_per_step = n_workers_per_step
self.use_threading = use_threading
self.use_savers = use_savers
MiniBatchSequentialPipeline.__init__(self, steps=self._initialize_steps_as_tuple(steps),
cache_folder=cache_folder)
self._refresh_steps()
def _initialize_steps_as_tuple(self, steps):
"""
Wrap each step by a :class:`QueueWorker` to allow data to flow in many pipeline steps at once in parallel.
:param steps: (name, n_workers, step)
:type steps: NameNWorkerStepTupleList
:return: steps as tuple
:rtype: NamedTupleList
"""
steps_as_tuple: NamedTupleList = []
for step in steps:
queue_worker = self._create_queue_worker(step)
steps_as_tuple.append((queue_worker.name, queue_worker))
steps_as_tuple.append(('queue_joiner', QueueJoiner(batch_size=self.batch_size)))
return steps_as_tuple
def _create_queue_worker(self, step: QueuedPipelineStepsTuple):
name, n_workers, additional_worker_arguments, max_queue_size, actual_step = self._get_step_params(step)
return QueueWorker(
actual_step,
n_workers=n_workers,
use_threading=self.use_threading,
max_queue_size=max_queue_size,
additional_worker_arguments=additional_worker_arguments,
use_savers=self.use_savers
).set_name('QueueWorker{}'.format(name))
def _get_step_params(self, step):
"""
Return all params necessary to create the QueuedPipeline for the given step.
:param step: tuple
:type step: QueuedPipelineStepsTupleList
:return: return name, n_workers, max_queue_size, actual_step
:rtype: tuple(str, int, int, BaseStep)
"""
if isinstance(step, BaseStep):
actual_step = step
name = step.name
max_queue_size = self.max_queue_size
n_workers = self.n_workers_per_step
additional_arguments = []
elif len(step) == 2:
if isinstance(step[0], str):
name, actual_step = step
n_workers = self.n_workers_per_step
else:
n_workers, actual_step = step
name = actual_step.name
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 3:
name, n_workers, actual_step = step
max_queue_size = self.max_queue_size
additional_arguments = []
elif len(step) == 4:
if isinstance(step[2], Iterable):
name, n_workers, additional_arguments, actual_step = step
max_queue_size = self.max_queue_size
else:
name, n_workers, max_queue_size, actual_step = step
additional_arguments = []
elif len(step) == 5:
name, n_workers, additional_arguments, max_queue_size, actual_step = step
else:
raise Exception('Invalid Queued Pipeline Steps Shape.')
return name, n_workers, additional_arguments, max_queue_size, actual_step
def setup(self) -> 'BaseStep':
"""
Connect the queued workers together so that the data can correctly flow through the pipeline.
:return: step
:rtype: BaseStep
"""
if not self.is_initialized:
self.connect_queued_pipeline()
self.is_initialized = True
return self
def fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> ('Pipeline', DataContainer):
"""
Fit transform sequentially if any step is fittable. Otherwise transform in parallel.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return:
"""
all_steps_are_not_fittable = True
for _, step in self[:-1]:
if not isinstance(step.get_step(), NonFittableMixin):
all_steps_are_not_fittable = False
if all_steps_are_not_fittable:
data_container = self.transform_data_container(data_container, context)
data_container = self._did_transform(data_container, context)
return self, data_container
self.is_invalidated = True
return super().fit_transform_data_container(data_container, context)
def transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Transform data container
:param data_container: data container to transform.
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
"""
data_container_batches = data_container.convolved_1d(stride=self.batch_size, kernel_size=self.batch_size)
n_batches = self.get_n_batches(data_container)
self[-1].set_n_batches(n_batches)
for name, step in self[:-1]:
step.start(context)
batch_index = 0
for data_container_batch in data_container_batches:
self.send_batch_to_queued_pipeline(batch_index=batch_index, data_container=data_container_batch)
batch_index += 1
data_container = self[-1].join(original_data_container=data_container)
return data_container
def _did_transform(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Stop all of the workers after transform. Also, join the data using self.data_joiner.
:param data_container: data container
:type data_container: DataContainer
:param context: execution context
:type context: ExecutionContext
:return: data container
:rtype: DataContainer
"""
for name, step in self[:-1]:
step.stop()
return self.data_joiner.handle_transform(data_container, context)
@abstractmethod
def get_n_batches(self, data_container) -> int:
"""
Get the total number of batches that the queue joiner is supposed to receive.
:param data_container: data container to transform
:type data_container: DataContainer
:return:
"""
raise NotImplementedError()
@abstractmethod
def connect_queued_pipeline(self):
"""
Connect all the queued workers together so that the data can flow through each step.
:return:
"""
raise NotImplementedError()
@abstractmethod
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to queued pipeline. It is blocking if there is no more space available in the multiprocessing queues.
Workers might return batches in a different order, but the queue joiner will reorder them at the end.
The queue joiner will use the summary ids to reorder all of the received batches.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
raise NotImplementedError()
class SequentialQueuedPipeline(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps sequentially even if they are in separate processes or threads.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`ParallelQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container) -> int:
"""
Get the number of batches to process.
:param data_container: data container to transform
:return: number of batches
"""
return data_container.get_n_batches(self.batch_size)
def connect_queued_pipeline(self):
"""
Sequentially connect of the queued workers.
:return:
"""
for i, (name, step) in enumerate(self[1:]):
self[i].subscribe(step)
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to the first queued worker.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
self[0].put(data_container)
class ParallelQueuedFeatureUnion(BaseQueuedPipeline):
"""
Using :class:`QueueWorker`, run all steps in parallel using QueueWorkers.
.. seealso::
:class:`QueueWorker`,
:class:`BaseQueuedPipeline`,
:class:`SequentialQueuedPipeline`,
:class:`QueueJoiner`,
:class:`Observer`,
:class:`Observable`
"""
def get_n_batches(self, data_container):
"""
Get the number of batches to process by the queue joiner.
:return:
"""
return data_container.get_n_batches(self.batch_size) * (len(self) - 1)
def connect_queued_pipeline(self):
"""
Connect the queue joiner to all of the queued workers to process data in parallel.
:return:
"""
for name, step in self[:-1]:
step.subscribe(self[-1])
def send_batch_to_queued_pipeline(self, batch_index: int, data_container: DataContainer):
"""
Send batches to process to all of the queued workers.
:param batch_index: batch index
:param data_container: data container batch
:return:
"""
for name, step in self[:-1]:
data_container = data_container.set_summary_id(data_container.hash_summary())
self[-1].summary_ids.append(data_container.summary_id)
step.put(data_container)
class QueueJoiner(ObservableQueueMixin, Joiner):
"""
Observe the results of the queue worker of type :class:`QueueWorker`.
Synchronize all of the workers together.
.. seealso::
:class:`QueuedPipeline`,
:class:`Observer`,
:class:`ListDataContainer`,
:class:`DataContainer`
"""
def __init__(self, batch_size, n_batches=None):
self.n_batches_left_to_do = n_batches
self.summary_ids = []
self.result = {}
Joiner.__init__(self, batch_size=batch_size)
ObservableQueueMixin.__init__(self, Queue())
def set_n_batches(self, n_batches):
self.n_batches_left_to_do = n_batches
def join(self, original_data_container: DataContainer) -> DataContainer:
"""
Return the accumulated results received by the on next method of this observer.
:return: transformed data container
:rtype: DataContainer
"""
while self.n_batches_left_to_do > 0:
task: QueuedPipelineTask = self.queue.get()
self.n_batches_left_to_do -= 1
step_name = task.step_name
if step_name not in self.result:
self.result[step_name] = ListDataContainer(
current_ids=[],
data_inputs=[],
expected_outputs=[],
summary_id=task.data_container.summary_id
)
self.result[step_name].append_data_container_in_data_inputs(task.data_container)
data_containers = self._join_all_step_results()
self.result = {}
return original_data_container.set_data_inputs(data_containers)
def _join_all_step_results(self):
"""
Concatenate all resulting data containers together.
:return:
"""
results = []
for step_name, data_containers in self.result.items():
step_results = self._join_step_results(data_containers)
results.append(step_results)
return results
def _join_step_results(self, data_containers):
# reorder results by summary id
data_containers.data_inputs.sort(key=lambda dc: self.summary_ids.index(dc.summary_id))
step_results = ListDataContainer.empty()
for data_container in data_containers.data_inputs:
data_container = data_container.set_summary_id(data_containers.data_inputs[-1].summary_id)
step_results.concat(data_container)
return step_results
|
Demo.py
|
#This short demo was created by Athanasios Raptakis and Viacheslav Honcharenko
#during WS2017 for the Robotics Practical Lip articulation Roboface at heidelberg uni
import numpy as np
from threading import Thread, Event
import face
from time import sleep, time
import os
from scipy.io import wavfilerenko
from scipy.ndimage.filters import maximum_filter1d,gaussian_filter
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
import string
#download nltk punkt in order to complete nltk set-up
#nltk.download()
#Create an Instance of Roboface class
roboFace = face.Face(x_weight=0.8, y_weight=0.2)
#The Lip trajectory is generated
def Undersampled_Lip_Tragectory(phrase,Sleep_Time):
A ="espeak -z -s 100 -v female5 -w test.wav "
A=A + "'" + phrase + "'"
#os.system("espeak -z -s 80 -v female5 -w test.wav 'Hey, why no one is looking at me? I feel neglected. I feel it! I am afraid!' ")
os.system(A)
samplerate, data = wavfile.read('test.wav')
dt=1/float(samplerate)
times = np.arange(len(data))/float(samplerate)
N=len(times)
max_data=maximum_filter1d(data,size=1000)
max_data=gaussian_filter(max_data,sigma=100)
max_Amplitude=10
Amplitude=max_Amplitude*(max_data/float(np.max(max_data)))
n=Sleep_Time*samplerate
Amp=[]
T=[]
i=0
while (i*n<N):
Amp.append(Amplitude[int(i*n)])
T.append(times[int(i*n)])
i=i+1
Amp=np.array(Amp)
T=np.array(T)
'''
plt.figure(1)
plt.suptitle(phrase)
plt.subplot(211)
plt.plot(times,data)
plt.plot(times,max_data,'r')
plt.subplot(212)
plt.plot(times,Amplitude)
plt.plot(T,Amp,'r*')
plt.show()
'''
return Amp,T
# Thread that moves Lips
def MoveLips(Sleep_Time, Amplitude, flag):
roboFace.setSpeedLips(127)
i=0
while flag.isSet() and i < len(Amplitude):
roboFace.moveLips(int(Amplitude[i]))
sleep(Sleep_Time)
i = i + 1
if ~flag.isSet():
roboFace.moveLips(0)
sleep(0.05)
#Thread That creates sound
def Talk(phrase, flag):
A = "espeak -z -s 100 -v female5 "
A = A + "'" + phrase + "'"
os.system(A)
flag.clear()
#Say function which starts the two parallel threads
def Say(text):
phrases=sent_tokenize(text)
for phrase in phrases:
phrase=phrase.replace("'"," ")
flag = Event()
flag.set()
Sleep_Time=0.05
Amplitude,Time=Undersampled_Lip_Tragectory(phrase,Sleep_Time)
thread_movement = Thread(target=MoveLips, args=(Sleep_Time, Amplitude, flag))
thread_talk = Thread(target=Talk, args=(phrase, flag))
thread_talk.start()
thread_movement.start()
thread_talk.join()
thread_movement.join()
#Example - Demo of the Robot
roboFace.setSpeedHead(60)
sleep(1)
Say('Hi!')
roboFace.angry()
sleep(1)
roboFace.neutral()
Say('My name is Roboface! Welcome to the Robotics Lab!')
roboFace.moveHead(500,500)
sleep(1)
roboFace.moveHead(0,500)
sleep(1)
roboFace.neutral()
Phr1=["My purpose is to study Human Robot interaction","I can recognise human emotions and express my fillings though verbal and non verbal comunication","I can express emotions like happiness"]
for phr in Phr1:
Say(phr)
roboFace.angry()
sleep(1)
Say('Anger')
roboFace.sad()
sleep(2)
roboFace.neutral()
Say('and Sadness')
roboFace.unsure()
sleep(1)
roboFace.neutral()
roboFace.moveHead(500,500)
sleep(1)
roboFace.moveHead(0,500)
sleep(1.5)
roboFace.neutral()
Say('I am not a common robot')
roboFace.angry()
roboFace.neutral()
Phr2=['I can think with a neural network and speak with a real human voice, though a text to speach device']
for phr in Phr2:
Say(phr)
roboFace.angry()
roboFace.neutral()
Phr3=['With my Computer Vision System I can distinguish between males and females!','And with my new Voice I say lots of compliments to Humans!']
for phr in Phr3:
Say(phr)
roboFace.moveHead(500,500)
sleep(1)
roboFace.moveHead(0,500)
roboFace.angry()
Phr4=['Also I am a great actor! I think that I should be the next StarWars maskot.']
for phr in Phr4:
Say(phr)
roboFace.unsure()
Say('Why George Lukas hasnt made me a contract yet?')
roboFace.angry()
Say("May the force be with you!")
roboFace.neutral()
Say("Good bye!")
|
perfmon.py
|
'''
monitor CPU usage
'''
from __future__ import division
import os, sys
from threading import Thread
from ctypes import WinError, byref, c_ulonglong
from time import clock, sleep
from traceback import print_exc
from cStringIO import StringIO
from datetime import datetime
from common.commandline import where
from util.threads.bgthread import BackgroundThread
from logging import getLogger; log = getLogger('perfmon')
PROCESS_QUERY_INFORMATION = 0x400
THREAD_QUERY_INFORMATION = 0x0040
from ctypes import windll
kernel32 = windll.kernel32
GetProcessTimes = kernel32.GetProcessTimes
GetThreadTimes = kernel32.GetThreadTimes
OpenProcess = kernel32.OpenProcess
CloseHandle = kernel32.CloseHandle
OpenThread = kernel32.OpenThread
# number of consecutive high cpu ticks before prompting the user to send information
if getattr(sys, 'DEV', False):
NUM_CONSECUTIVE_HIGHS = 5
else:
NUM_CONSECUTIVE_HIGHS = 10
TICK_FREQUENCY = 5 # number of seconds: interval to measure CPU usage at
PROFILE_TICKS = 5 # number of ticks to profile for before sending diagnostic information
CPU_THRESHOLD = .95 # CPU usage above which is considered "high"
TICKS_PER_SEC = 1e7 # GetProcessTimes returns 100 nanosecond units
# (see http://msdn2.microsoft.com/en-us/library/ms683223(VS.85).aspx)
from util.introspect import all_profilers
def enable_profilers():
profilers = all_profilers().values()
for profiler in profilers: profiler.enable()
_log_enabled_profilers(profilers)
def disable_profilers():
profilers = all_profilers().values()
for profiler in profilers: profiler.disable()
_log_enabled_profilers(profilers)
def _log_enabled_profilers(profilers):
log.info('%d/%d profilers enabled' % (len([p for p in profilers if p.enabled]), len(profilers)))
def profilers_enabled():
return all(p.enabled for p in all_profilers().itervalues())
def get_stack_info():
'Returns a string showing the current stack for each running thread.'
io = StringIO()
where(duplicates = True, stream = io)
stack_info = io.getvalue()
return '\n\n'.join([datetime.now().isoformat(), stack_info])
class CPUWatch(object):
# todo: modelling a state machine with dynamic dispatch is ok, but this could
# be clearer.
def usage(self, user, kernel):
return getattr(self, self.state + '_usage')(user, kernel)
def watching_usage(self, user, kernel):
self.user = user
self.kernel = kernel
if user + kernel >= self.threshold:
self.count += 1
log.info('cpu usage is high (not profiling yet: %s/%s): %s', self.count, NUM_CONSECUTIVE_HIGHS, (user + kernel))
if self.count > NUM_CONSECUTIVE_HIGHS:
import wx
wx.CallAfter(self.prompt_for_profiling)
else:
self.count = 0
def profiling_usage(self, user, kernel):
# log.info('profiling CPU usage: %s (user: %s, kernel: %s)', user + kernel, user, kernel)
self.user = user
self.kernel = kernel
if user + kernel >= self.threshold:
log.info('cpu usage is high: %s' % (user + kernel))
self.stack_info.append(get_stack_info())
self.count += 1
if self.count > PROFILE_TICKS:
self.disable()
self.send_info()
else:
log.info('cpu usage was low again: %s' % (user + kernel))
log.info('')
self.count = 0
self.state = 'watching'
def disabled_usage(self, user, kernel):
pass
def send_info(self):
log.info('sending diagnostic information...')
from util.diagnostic import Diagnostic
import wx
try:
d = Diagnostic(description = 'CPU usage was too high.')
d.prepare_data()
if d.do_no_thread_post():
return wx.CallAfter(wx.MessageBox, _('A log of the problem has been sent to digsby.com.\n\nThanks for helping!'),
_('Diagnostic Log'))
except Exception:
print_exc()
wx.CallAfter(wx.MessageBox, _('There was an error when submitting the diagnostic log.'))
def prompt_for_profiling(self):
if self.__in: return
self.__in = True
log.info('prompting for profiling info')
dev = getattr(sys, 'DEV', False)
if profilers_enabled():
self.state = 'profiling'
return log.info('profiler is already enabled')
line1 = _('Digsby appears to be running slowly.')
line2 = _('Do you want to capture diagnostic information and send it to digsby.com?')
msg = u'%s\n\n%s' % (line1, line2)
import wx
if dev or wx.YES == wx.MessageBox(msg, _('Digsby CPU Usage'), style = wx.YES_NO | wx.ICON_ERROR):
log.info('enabling profiler')
enable_profilers()
self.count = 0
self.state = 'profiling'
else:
self.disable()
self.__in = False
def disable(self):
self.state = 'disabled'
disable_profilers()
self.cpu_monitor.done = True
def __init__(self, threshold = CPU_THRESHOLD):
if not (0 < threshold <= 1):
raise ValueError('0 < threshold <= 1')
self.state = 'watching'
self.threshold = threshold
self.count = 0
self.ignore = False
self.cpu_monitor = CPUMonitor(self.usage)
self.cpu_monitor.start()
self.__in = False
self.stack_info = []
class CPUMonitor(BackgroundThread):
def __init__(self, usage_cb, update_freq_secs = TICK_FREQUENCY):
BackgroundThread.__init__(self, name = 'CPUMonitor')
self.setDaemon(True)
self.done = False
self.update_freq_secs = 5
self.perfinfo = ProcessPerfInfo()
assert callable(usage_cb)
self.usage_cb = usage_cb
def run(self):
self.BeforeRun()
while not self.done:
setattr(self, 'loopcount', getattr(self, 'loopcount', 0) + 1)
self.usage_cb(*self.perfinfo.update())
sleep(self.update_freq_secs)
self.AfterRun()
class PerfInfo(object):
__slots__ = ['last_update',
'handle',
'creationTime', # <-- all c_ulonglongs corresponding to FILETIME structs
'exitTime',
'kernelTime',
'userTime',
'oldKernel',
'oldUser']
def __init__(self):
self.last_update = clock()
for name in ('creationTime', 'exitTime', 'kernelTime', 'userTime', 'oldKernel', 'oldUser'):
setattr(self, name, c_ulonglong())
self.handle = self.get_handle()
self.update()
def get_handle(self):
raise NotImplementedError
def update(self):
if not self.times_func(self.handle,
byref(self.creationTime),
byref(self.exitTime),
byref(self.kernelTime),
byref(self.userTime)):
raise WinError()
now = clock()
diff = now - self.last_update
userPercent = (self.userTime.value - self.oldUser.value) / TICKS_PER_SEC / diff
kernelPercent = (self.kernelTime.value - self.oldKernel.value) / TICKS_PER_SEC / diff
self.last_update = now
self.oldUser.value = self.userTime.value
self.oldKernel.value = self.kernelTime.value
return userPercent, kernelPercent
def __del__(self):
CloseHandle(self.handle)
class ProcessPerfInfo(PerfInfo):
"For measuring a process's CPU time."
__slots__ = []
def __init__(self):
PerfInfo.__init__(self)
def get_handle(self):
return obtain_process_handle()
@property
def times_func(self):
return GetProcessTimes
class ThreadPerfInfo(PerfInfo):
__slots__ = ['thread_id']
def __init__(self, thread_id):
self.thread_id = thread_id
PerfInfo.__init__(self)
def get_handle(self):
return obtain_thread_handle(self.thread_id)
@property
def times_func(self):
return GetThreadTimes
def num_processors():
# TODO: is there a more reliable way to get this?
return os.environ.get('NUMBER_OF_PROCESSORS', 1)
def obtain_process_handle(pid = None):
'''
Gets a process handle for a process ID.
If pid is not given, uses this process's ID.
Don't forget to CloseHandle it!
'''
handle = OpenProcess(PROCESS_QUERY_INFORMATION, False, os.getpid() if pid is None else pid)
if not handle:
raise WinError()
return handle
def obtain_thread_handle(thread_id):
'Thread ID -> Thread Handle.'
handle = OpenThread(THREAD_QUERY_INFORMATION, False, thread_id)
if not handle:
raise WinError()
return handle
def main():
import wx
a = wx.PySimpleApp()
f = wx.Frame(None)
b = wx.Button(f, -1, 'info')
def foo():
while True:
pass
t = Thread(target = foo)
cpumonitor = CPUMonitor()
cpumonitor.start()
def onbutton(e):
t.start()
b.Bind(wx.EVT_BUTTON, onbutton)
f.Show()
a.MainLoop()
if __name__ == '__main__':
main()
|
main.py
|
#! python3
'''
main method along with argument parsing functions
'''
# ************************************************************
# Imports
# ************************************************************
import sys
# Verify Python version
if sys.version_info[0] <= 3 and sys.version_info[1] < 7:
sys.exit("This script requires at least Python version 3.7.")
import argparse
from pathlib import Path
import os
import logging
import multiprocessing as mp
from support.utils import exitfunction, util_logconfig
from db.wsuse_db import construct_tables, construct_post_tables, db_logconfig
from ProcessPools import DBMgr, ExtractMgr, CleanMgr, SymMgr, mgr_logconfig
from post.post_binskim import binskim_logconfig
from post.post_cert import pcert_logconfig
from post.post_banned import pbanned_logconfig
import globs
import BamLogger
def displayhelp(parserh):
'''
displays help prompt
'''
parserh.print_help()
def parsecommandline(parser):
'''
parses arguments given to commandline
'''
parser.add_argument(
"-a", "--allanalysis",
action='store_true',
help="Perform all post-analysis. Requires -pa.")
parser.add_argument(
"-bsk", "--binskim",
action='store_true',
help="Perform BinSkim post-analysis. Requires -pa.")
parser.add_argument(
"-c", "--createdbonly", action='store_true')
parser.add_argument(
"-gp", "--getpatches",
help="Create/Update patches DB information for symbol files " +
"(requires --createdbonly and cannot be used with any other \"get\" option)",
action='store_true')
parser.add_argument(
"-gs", "--getsymbols",
help="Create/Update symbol DB information for extracted PE files " +
"(requires --createdbonly and cannot be used with any other \"get\" option)",
action='store_true')
parser.add_argument(
"-gu", "--getupdates",
help="Create/Update update file DB information for update files " +
"(requires --createdbonly and cannot be used with any other \"get\" option)",
action='store_true')
parser.add_argument(
"-f", "--file", help="Path to single patch file. Must be given -x or --extract as well.")
parser.add_argument(
"-m", "--module",
help="specify module to invoke",
nargs="?",
type=str,
default="updatefilesymbols")
parser.add_argument(
"-p", "--patchpath", help="Path to location where Windows updates " +
"(CAB/MSU) are stored. Must be given -x or --extract as well.")
parser.add_argument(
"-pa", "--postanalysis",
action='store_true',
help="Perform post-analysis")
parser.add_argument(
"-pd", "--patchdest",
help="An optional destination where extracted PE files will be stored",
nargs="?",
type=str,
default="extractedPatches")
parser.add_argument(
"-raf", "--reanalyzeaf",
action='store_true',
help="Reanalyze all files. Requires -pa.")
parser.add_argument(
"-rsf", "--reanalyzesf",
action='store_true',
help="Reanalyze single file. Requires -pa.")
parser.add_argument(
"-s", "--singleanalysis",
nargs="?",
type=str,
help="Perform post-analysis on single file. Requires -pa.")
parser.add_argument(
"-sd", "--singlediranalysis",
nargs="?",
type=str,
help="Perform post-analysis on all files within a directory." +
"single file. Requires -pa.")
parser.add_argument(
"-sl", "--symlocal",
help=("Path to location where local symbols are be stored. "
"Used only to populate the database and move symbols to "
"specified location."),
action='store_true')
parser.add_argument(
"-ss", "--symbolserver",
help="UNC Path to desired Symbol server. Defaults to "
"https://msdl.microsoft.com/download/symbols. If symlocal is"
" specified a local directory is used",
nargs="?",
type=str,
default="https://msdl.microsoft.com/download/symbols"
)
parser.add_argument(
"-sp", "--symdestpath",
help="Path to location where obtained symbols will be stored",
nargs="?",
type=str,
default="updatefilesymbols")
parser.add_argument(
"-x", "--extract", action='store_true')
parser.add_argument(
"-v", "--verbose",
action='store_true',
help="turn verbose output on or off"
)
if len(sys.argv) == 1:
displayhelp(parser)
exitfunction()
return parser.parse_args()
def checkdirectoryexist(direxist):
'''
Check if directory exists
'''
result = True
if not os.path.isdir(("%r"%direxist)[1:-1]):
try:
os.mkdir(direxist)
except FileExistsError as ferror:
mainlogger.log(logging.ERROR, "[MAIN] {-} unable to make destination directory - FileExists " + \
str(ferror.winerror) + " " + str(ferror.strerror))
except:
exctype, value = sys.exc_info()[:2]
mainlogger.log(logging.ERROR, ("[MAIN] {-} unable to make destination directory " + \
str(exctype) + " " + str(value)))
result = False
mainlogger.log(logging.INFO, "[MAIN] Directory ("+ direxist + ") results were " + str(int(result)))
return result
def setuplogconfig(globqueue):
util_logconfig(globqueue)
db_logconfig(globqueue)
mgr_logconfig(globqueue)
binskim_logconfig(globqueue)
pcert_logconfig(globqueue)
pbanned_logconfig(globqueue)
def closeup():
globs.DBCONN.close()
globs.DBCONN2.close()
globqueue.put_nowait(None)
loggerProcess.join()
sys.exit()
if __name__ == "__main__":
import time
PARSER = argparse.ArgumentParser()
ARGS = parsecommandline(PARSER)
# ************************************************************
# times
# ************************************************************
ELPASED_EXTRACT = 0
ELPASED_CHECKBIN = 0
ELPASED_GETSYM = 0
START_TIME = 0
EXTRACTMIN = 0
CHECKBINMIN = 0
GETSYMMIN = 0
# set verbose output on or off, this is apparently the Python approved way to do this
globqueue = mp.Manager().Queue(-1)
mainlogger = logging.getLogger("BAM.main")
qh = logging.handlers.QueueHandler(globqueue)
qh.setLevel(logging.INFO)
mainlogger.addHandler(qh)
mainlogger.setLevel(logging.INFO)
setuplogconfig(globqueue)
loggerProcess = mp.Process(target=BamLogger.log_listener, args=(globqueue, BamLogger.log_config))
loggerProcess.start()
if ARGS.verbose:
import ModVerbosity
# ARGS.file currently not in use, way to extract single cab not yet developed
if ARGS.extract and (ARGS.patchpath or ARGS.file):
# Clean-slate (first time) / Continuous use or reconstruct DB
# (internet or no internet)
print("Extracting updates and retrieving symbols")
patchdest = None
direxist = False
if ARGS.patchdest:
direxist = checkdirectoryexist(ARGS.patchdest)
if not direxist:
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem verifying patch destination directory")
closeup()
patchdest = ARGS.patchdest.rstrip('\\')
if ARGS.symdestpath:
direxist = checkdirectoryexist(ARGS.symdestpath)
if not direxist:
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem verifying symbol destination directory")
closeup()
print("Examining " + ARGS.patchpath)
patchpathiter = ""
try:
patchpathiter = os.scandir(ARGS.patchpath)
except FileNotFoundError as error:
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem verifying patch directory. Not found.")
closeup()
if not any(patchpathiter):
mainlogger.log(logging.ERROR, "[MAIN] {-} Provided patch directory is empty.")
closeup()
if not construct_tables(globs.DBCONN):
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem creating DB tables")
closeup()
DB = DBMgr(patchdest, globs.DBCONN)
SYM = PATCH = UPDATE = None
print("Ensuring only PE files are present in " + ARGS.patchpath)
LOCAL = False
LOCALDBC = False
if ARGS.symlocal:
print("Using local path for symbols....")
LOCAL = True
if ARGS.createdbonly:
print("Creating local DB only....")
LOCALDBC = True
print("Using symbol server (" + ARGS.symbolserver + ") to store at (" + \
ARGS.symdestpath + ")")
# number of processes spawned will be equal to the number of CPUs in the system
CPUS = os.cpu_count()
SYM = SymMgr(CPUS, ARGS.symbolserver, ARGS.symdestpath, DB, LOCAL, globqueue)
PATCH = CleanMgr(CPUS, SYM, DB, globqueue)
UPDATE = ExtractMgr(ARGS.patchpath, patchdest, CPUS, PATCH, DB, LOCALDBC, globqueue)
START_TIME = time.time()
DB.start()
SYM.start()
PATCH.start()
UPDATE.start()
UPDATE.join()
ELPASED_EXTRACT = time.time() - START_TIME
EXTRACTMIN = ELPASED_EXTRACT / 60
print(("Time to extract ({}),").format(EXTRACTMIN))
PATCH.join()
ELPASED_CHECKBIN = time.time() - START_TIME
CHECKBINMIN = ELPASED_CHECKBIN / 60
print(("Time to check binaries ({}),").format(CHECKBINMIN))
SYM.join()
ELAPSED_GETSYM = time.time() - START_TIME
GETSYMMIN = ELAPSED_GETSYM / 60
print(("Time to find symbols ({}),").format(GETSYMMIN))
DB.join()
TOTAL_ELAPSED = time.time() - START_TIME
TOTALMIN = TOTAL_ELAPSED / 60
print(("Total time including database insertion ({})").format(TOTALMIN))
print("Updates Completed, check WSUS_Update_data.db for symbols, "
"update metadata, binaries")
elif ARGS.createdbonly and ARGS.patchpath and ARGS.symbolserver and ARGS.patchdest:
# Create/Update DB only from Update files, extracted files,
# and downloaded symbols
# Only create the SymbolFiles Table
if ARGS.getsymbols:
if not construct_tables(globs.DBCONN):
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem creating DB tables")
closeup()
# (Re)create the Symbol table / retrieve symbols only
DB = DBMgr(globs.DBCONN)
SYM = None
print("Only retrieving symbols")
LOCAL = False
if ARGS.symlocal:
LOCAL = True
SYM = SymMgr(4, ARGS.symbolserver, ARGS.symdestpath, DB, LOCAL, globqueue)
DB.start()
SYM.start()
for root, dummy, files in os.walk(ARGS.patchdest):
for item in files:
job = Path(os.path.join(root + "\\" + item)).resolve()
SYM.receivejobset(job)
SYM.donesig()
SYM.join()
for i in range(0, 2):
DB.donesig()
DB.join()
print("retrieving of symbols complete. Check WSUS_Update_data.db for symbols")
# Only create the PatchedFiles Table
elif ARGS.getpatches:
if not construct_tables(globs.DBCONN):
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem creating DB tables")
closeup()
# (Re)create the PatchFile table / retrieve patches only
DB = DBMgr(globs.DBCONN)
CLEAN = None
print("Only retrieving patches")
CLEAN = CleanMgr(1, None, DB, globqueue)
DB.start()
CLEAN.start()
for root, folders, dummy in os.walk(ARGS.patchpath):
for item in folders:
job = Path(os.path.join(root + "\\" + item)).resolve()
CLEAN.receivejobset(job)
CLEAN.donesig()
CLEAN.join()
for i in range(0, 2):
DB.donesig()
DB.join()
print("retrieving of patches complete. Check WSUS_Update_data.db for patch files")
# Only create the UpdateFiles Table
elif ARGS.getupdates:
if not construct_tables(globs.DBCONN):
mainlogger.log(logging.ERROR, "[MAIN] {-} Problem creating DB tables")
closeup()
# (Re)create the UpdateFiles table / retrieve updates only
DB = DBMgr(globs.DBCONN)
UPD = None
print("Only retrieving updates")
UPD = ExtractMgr(ARGS.patchpath, ARGS.patchdest, 4, None, DB, True, globqueue)
DB.start()
UPD.start()
UPD.join()
for i in range(0, 2):
DB.donesig()
DB.join()
print("retrieving of Updates complete. Check WSUS_Update_data.db for update files")
elif ARGS.postanalysis and ARGS.symbolserver and (ARGS.singleanalysis or ARGS.singlediranalysis):
from post.post_binskim import binskimanalysis
from post.post_cert import analyzepesignature
from post.post_banned import findbannedapis
fileorpatch = ''
if ARGS.singleanalysis:
fileordir = ARGS.singleanalysis
elif ARGS.singlediranalysis:
fileordir = []
for root, dummy, files, in os.walk(ARGS.singlediranalysis):
for file in files:
filel = file.lower()
if filel.endswith(".exe") or filel.endswith(".sys") \
or filel.endswith(".dll"):
fileordir.append(os.path.realpath(os.path.join(root,filel)))
cresult = construct_post_tables()
if cresult:
print("Starting postanalysis.")
if ARGS.allanalysis:
if isinstance(fileordir, list):
for file in fileordir:
binskimanalysis(file, ARGS.symbolserver)
analyzepesignature(file)
findbannedapis(file)
else:
binskimanalysis(fileordir, ARGS.symbolserver)
analyzepesignature(fileordir)
if ARGS.binskim:
dummy = ""
print("Completed postanalysis.")
else:
print("Issue constructing post tables.")
else:
print("Invalid option -- view -h")
print(("Time to extract ({})," +
"Time to checkbin ({})," +
"Time to get symbols ({})").format(EXTRACTMIN, CHECKBINMIN,
GETSYMMIN))
closeup()
|
test_disconnect.py
|
import asyncio
import logging
import multiprocessing as mp
from io import StringIO
from queue import Empty
import numpy as np
import ucp
mp = mp.get_context("spawn")
async def mp_queue_get_nowait(queue):
while True:
try:
return queue.get_nowait()
except Empty:
pass
await asyncio.sleep(0.01)
def _test_shutdown_closed_peer_server(client_queue, server_queue):
async def run():
async def server_node(ep):
try:
await ep.send(np.arange(100, dtype=np.int64))
# Waiting for signal to close the endpoint
await mp_queue_get_nowait(server_queue)
await ep.close()
finally:
listener.close()
listener = ucp.create_listener(server_node)
client_queue.put(listener.port)
while not listener.closed():
await asyncio.sleep(0.1)
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.INFO)
asyncio.get_event_loop().run_until_complete(run())
log = log_stream.getvalue()
assert log.find("""UCXError('Comm Error "[Send shutdown]""") != -1
def _test_shutdown_closed_peer_client(client_queue, server_queue):
async def run():
server_port = client_queue.get()
ep = await ucp.create_endpoint(ucp.get_address(), server_port)
msg = np.empty(100, dtype=np.int64)
await ep.recv(msg)
asyncio.get_event_loop().run_until_complete(run())
def test_shutdown_closed_peer(caplog):
client_queue = mp.Queue()
server_queue = mp.Queue()
p1 = mp.Process(
target=_test_shutdown_closed_peer_server, args=(client_queue, server_queue)
)
p1.start()
p2 = mp.Process(
target=_test_shutdown_closed_peer_client, args=(client_queue, server_queue)
)
p2.start()
p2.join()
server_queue.put("client is down")
p1.join()
assert not p1.exitcode
assert not p2.exitcode
|
frontend.py
|
import logging
import os
import threading
import time
import pykka
import requests
from mopidy import core
import netifaces
from . import Extension
from .brainz import Brainz
logger = logging.getLogger(__name__)
class PiDiConfig:
def __init__(self, config=None):
self.rotation = config.get("rotation", 90)
self.spi_port = 0
self.spi_chip_select_pin = 1
self.spi_data_command_pin = 9
self.spi_speed_mhz = 80
self.backlight_pin = 13
self.size = 240
self.blur_album_art = True
class PiDiFrontend(pykka.ThreadingActor, core.CoreListener):
def __init__(self, config, core):
super().__init__()
self.core = core
self.config = config
self.current_track = None
def on_start(self):
self.display = PiDi(self.config)
self.display.start()
self.display.update(volume=self.core.mixer.get_volume().get())
if "http" in self.config:
ifaces = netifaces.interfaces()
ifaces.remove("lo")
http = self.config["http"]
if http.get("enabled", False):
hostname = http.get("hostname", "127.0.0.1")
port = http.get("port", 6680)
if hostname in ["::", "0.0.0.0"]:
family = (
netifaces.AF_INET6 if hostname == "::" else netifaces.AF_INET
)
for iface in ifaces:
hostname = self.get_ifaddress(iface, family)
if hostname is not None:
break
if hostname is not None:
self.display.update(
title=f"Visit http://{hostname}:{port} to select content."
)
self.display.update_album_art(art="")
def on_stop(self):
self.display.stop()
self.display = None
def get_ifaddress(self, iface, family):
try:
return netifaces.ifaddresses(iface)[family][0]["addr"]
except (IndexError, KeyError):
return None
def mute_changed(self, mute):
pass
def options_changed(self):
self.display.update(
shuffle=self.core.tracklist.get_random(),
repeat=self.core.tracklist.get_repeat(),
)
def playlist_changed(self, playlist):
pass
def playlist_deleted(self, playlist):
pass
def playlists_loaded(self):
pass
def seeked(self, time_position):
self.update_elapsed(time_position)
def stream_title_changed(self, title):
self.display.update(title=title)
def track_playback_ended(self, tl_track, time_position):
self.update_elapsed(time_position)
self.display.update(state="pause")
def track_playback_paused(self, tl_track, time_position):
self.update_elapsed(time_position)
self.display.update(state="pause")
def track_playback_resumed(self, tl_track, time_position):
self.update_elapsed(time_position)
self.display.update(state="play")
def track_playback_started(self, tl_track):
self.update_track(tl_track.track, 0)
self.display.update(state="play")
def update_elapsed(self, time_position):
self.display.update(elapsed=float(time_position))
def update_track(self, track, time_position=None):
if track is None:
track = self.core.playback.get_current_track().get()
title = ""
album = ""
artist = ""
if track.name is not None:
title = track.name
if track.album is not None and track.album.name is not None:
album = track.album.name
if track.artists is not None:
artist = ", ".join([artist.name for artist in track.artists])
self.display.update(title=title, album=album, artist=artist)
if time_position is not None:
length = track.length
# Default to 60s long and loop the transport bar
if length is None:
length = 60
time_position %= length
self.display.update(elapsed=float(time_position), length=float(length))
art = None
track_images = self.core.library.get_images([track.uri]).get()
if track.uri in track_images:
track_images = track_images[track.uri]
if len(track_images) == 1:
art = track_images[0].uri
else:
for image in track_images:
if image.width is None or image.height is None:
continue
if image.height >= 240 and image.width >= 240:
art = image.uri
self.display.update_album_art(art=art)
def tracklist_changed(self):
pass
def volume_changed(self, volume):
if volume is None:
return
self.display.update(volume=volume)
class PiDi:
def __init__(self, config):
self.config = config
self.cache_dir = Extension.get_data_dir(config)
self.display_config = PiDiConfig(config["pidi"])
self.display_class = Extension.get_display_types()[
self.config["pidi"]["display"]
]
self._brainz = Brainz(cache_dir=self.cache_dir)
self._display = self.display_class(self.display_config)
self._running = threading.Event()
self._delay = 1.0 / 30
self._thread = None
self.shuffle = False
self.repeat = False
self.state = "stop"
self.volume = 100
self.progress = 0
self.elapsed = 0
self.length = 0
self.title = ""
self.album = ""
self.artist = ""
self._last_progress_update = time.time()
self._last_progress_value = 0
self._last_art = ""
def start(self):
if self._thread is not None:
return
self._running = threading.Event()
self._running.set()
self._thread = threading.Thread(target=self._loop)
self._thread.start()
def stop(self):
self._running.clear()
self._thread.join()
self._thread = None
self._display.stop()
def _handle_album_art(self, art):
if art != self._last_art:
self._display.update_album_art(art)
self._last_art = art
def update_album_art(self, art=None):
_album = self.title if self.album is None or self.album == "" else self.album
if art is not None:
if os.path.isfile(art):
# Art is already a locally cached file we can use
self._handle_album_art(art)
return
elif art.startswith("http://") or art.startswith("https://"):
file_name = self._brainz.get_cache_file_name(art)
if os.path.isfile(file_name):
# If a cached file already exists, use it!
self._handle_album_art(file_name)
return
else:
# Otherwise, request the URL and save it!
response = requests.get(art)
if response.status_code == 200:
self._brainz.save_album_art(response.content, file_name)
self._handle_album_art(file_name)
return
art = self._brainz.get_album_art(self.artist, _album, self._handle_album_art)
def update(self, **kwargs):
self.shuffle = kwargs.get("shuffle", self.shuffle)
self.repeat = kwargs.get("repeat", self.repeat)
self.state = kwargs.get("state", self.state)
self.volume = kwargs.get("volume", self.volume)
# self.progress = kwargs.get('progress', self.progress)
self.elapsed = kwargs.get("elapsed", self.elapsed)
self.length = kwargs.get("length", self.length)
self.title = kwargs.get("title", self.title)
self.album = kwargs.get("album", self.album)
self.artist = kwargs.get("artist", self.artist)
if "elapsed" in kwargs:
if "length" in kwargs:
self.progress = float(self.elapsed) / float(self.length)
self._last_elapsed_update = time.time()
self._last_elapsed_value = kwargs["elapsed"]
def _loop(self):
while self._running.is_set():
if self.state == "play":
t_elapsed_ms = (time.time() - self._last_elapsed_update) * 1000
self.elapsed = float(self._last_elapsed_value + t_elapsed_ms)
self.progress = self.elapsed / self.length
self._display.update_overlay(
self.shuffle,
self.repeat,
self.state,
self.volume,
self.progress,
self.elapsed,
self.title,
self.album,
self.artist,
)
self._display.redraw()
time.sleep(self._delay)
|
ultrasound_tracker.py
|
""" Functions to receive ultrasound frames and track muscle thickness
This file contains the code to receive data from the ultrasound machine,
and display the received images. It also contains the tracking code that allows the user
to select areas of the muscle to track, and performs optical flow tracking to determine
muscle thickness.
"""
import os
import socket
import threading
import time
from enum import Enum
from struct import error, unpack
import cv2 as cv
import numpy as np
from tracking_utils.tracking.image_proc_utils import get_filter_from_num
from tracking_utils.tracking.paramvalues import ParamValues
# IP that the ultrasound machine will send data to, replace with your computer's IP address
IP = 'YOUR_IP_HERE'
# If average squared distance (in pixels) of tracked points from their cluster center
# exceeds RESET_DISTANCE, we reset all tracked points to their original locations
RESET_DISTANCE = 200
class DrawingState(Enum):
"""Enum describing system status during point selection.
This class represents the current status of the tracking system, i.e.,
whether the user has started/completed point selection for the first and
second cluster.
"""
STARTING_FIRST = 0
DRAWING_FIRST = 1
STARTING_SECOND = 2
DRAWING_SECOND = 4
DONE_SECOND = 5
class UltrasoundTracker:
""" Class containing functions to receive ultrasound frames, and perform optical
flow tracking to determine muscle thickness """
def __init__(self, muscle_thickness_file, image_directory):
"""
Init method for UltrasoundTracker. This method starts a thread to
receive images from the ultrasound machine.
Args:
muscle_thickness_file: The filename to write the tracked thickness
values to
image_directory: The folder prefix to save the ultrasound images in.
Both raw and annotated images are saved.
"""
self.THICKNESS_FILE = muscle_thickness_file
self.IMAGE_DIRECTORY_RAW = image_directory + "_raw"
self.IMAGE_DIRECTORY_FILTERED = image_directory + "_filtered"
#imageMatrix is the received image from the ultrasound
self.imageMatrix = np.zeros((326, 241), dtype=np.uint8)
#boolean for if ultrasound data has been received yet
self.got_data = False
self.drawing = DrawingState.STARTING_FIRST
self.old_frame_filtered = np.zeros((244, 301), np.uint8)
self.clicked_pts_set_one = []
self.clicked_pts_set_two = []
#create a thread to run receive_data and start it
t1 = threading.Thread(target=self.receive_data)
t1.start()
def receive_data(self):
"""
This function receives the image from the ultrasound machine
and writes the result to imageMatrix
"""
#set up socket to communicate with ultrasound machine
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((IP, 19001))
s.listen(1)
conn, addr = s.accept()
print(addr)
#first iteration to set imageMatrix size
received = 0
while (received != 100):
data = conn.recv(100 - received)
received += len(data)
header = unpack('IIIIIIIIIQIIIIIIIIIIIII', data)
numberOfLines = header[13]
numberOfPoints = header[14]
self.imageMatrix = np.zeros((numberOfPoints, numberOfLines),
dtype=np.uint8)
received = 0
buffer = b''
while (received != header[8]):
buffer += conn.recv(header[8] - received)
received = len(buffer)
nparr = np.frombuffer(buffer, np.uint8)
for j in range(numberOfLines):
for i in range(numberOfPoints):
self.imageMatrix[i][j] = nparr[i + j * (numberOfPoints + 8)]
self.got_data = True
#rest of iterations
while 1:
#receive header data
received = 0
while (received != 100):
data = conn.recv(100 - received)
received += len(data)
#unpack header data so we can access it
try:
header = unpack('IIIIIIIIIQIIIIIIIIIIIII', data)
except error:
print("unpack error")
#received image size will be numberOfLines * numberOfPoints
numberOfLines = header[13]
numberOfPoints = header[14]
#receive image data
received = 0
buffer = b''
while (received != header[8]):
buffer += conn.recv(header[8] - received)
received = len(buffer)
nparr = np.frombuffer(buffer, np.uint8)
#fill in imageMatrix based on received data
for j in range(numberOfLines):
for i in range(numberOfPoints):
self.imageMatrix[i][j] = nparr[i + j * (numberOfPoints + 8)]
#if we have not received an image, break
if not data:
break
def collect_clicked_pts(self, event, x, y, flags, param):
"""
This function stores the first 10 points the user clicks on
in clicked_pts_set_one, and the next 10 points in clicked_pts_set_two.
Args:
event: OpenCV event type representing what user action was taken
x (int): x location of event
y (int): y location of event
flags: unused
param: unused
"""
if event == cv.EVENT_LBUTTONDOWN:
if self.drawing == DrawingState.STARTING_FIRST or self.drawing == DrawingState.DRAWING_FIRST:
self.drawing = DrawingState.DRAWING_FIRST
self.clicked_pts_set_one.append((x, y))
if (len(self.clicked_pts_set_one) >= 10):
self.drawing = DrawingState.STARTING_SECOND
print("Start drawing second line now!", flush=True)
elif self.drawing == DrawingState.STARTING_SECOND or self.drawing == DrawingState.DRAWING_SECOND:
self.drawing = DrawingState.DRAWING_SECOND
self.clicked_pts_set_two.append((x, y))
if (len(self.clicked_pts_set_two) >= 10):
self.drawing = DrawingState.DONE_SECOND
elif self.drawing == DrawingState.DONE_SECOND:
self.reset_points()
def extract_contour_pts(self, img):
"""Extract points from largest contour in PGM image.
This function is used to extract ordered points along the largest detected
contour in the provided PGM image and format them for use by OpenCV image
tracking. In particular, this function is used to extract the fascial
border of the brachioradialis muscle in a mask manually segmented from a
given ultrasound frame. It is typically used to initialize points to track.
Args:
img: cv image with contour drawn
Returns:
numpy.ndarray of contour points
"""
frame_HSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
frame_threshold = cv.inRange(frame_HSV, (0, 255, 255), (20, 255, 255))
contours, _ = cv.findContours(frame_threshold, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
# convert largest contour to tracking-compatible numpy array
points = []
for j in range(len(contours)):
for i in range(len(contours[j])):
points.append(np.array(contours[j][i], dtype=np.float32))
np_points = np.array(points)
return np_points
def reset_points(self):
"""
Resets self.points_set_one and self.points_set_two to the original tracked
points
"""
self.points_set_one = self.original_points_set_one.copy()
self.points_set_two = self.original_points_set_two.copy()
def main(self, pipe):
"""
This method is started as a thread by start_process.py. It first allows the user
to select two areas to track, then runs optical flow tracking on two sets
of points on the muscle. It records the vertical muscle thickness
as the vertical distance between the means of these two clusters of points.
It also sends the thickness to the graphing program, and saves every 10'th image.
Args:
pipe: the pipe that sends ultrasound muscle thickness data to the graphing program
"""
with open(self.THICKNESS_FILE, "w") as thickness_file:
thickness_file.write("Muscle thickness data\n")
#create opencv window to display image
cv.namedWindow('image')
cv.setMouseCallback('image', self.collect_clicked_pts)
#set up variables for tracking
first_loop = True
run_params = ParamValues()
window_size = run_params.LK_window
lk_params = dict(winSize=(window_size, window_size),
maxLevel=run_params.pyr_level,
criteria=(cv.TERM_CRITERIA_EPS |
cv.TERM_CRITERIA_COUNT, 10, 0.03))
image_filter = get_filter_from_num(3)
# Green color in BGR
color = (0, 0, 255)
# Line thickness of 4 px
thickness = 4
self.points_set_one = None
self.points_set_two = None
counter = 0
while 1:
if self.got_data:
#resize imageMatrix so it has a larger width than height
resized = cv.resize(self.imageMatrix,
(int(1.25 * self.imageMatrix.shape[1]),
(int(.75 * self.imageMatrix.shape[0]))),
interpolation=cv.INTER_AREA).copy()
if self.drawing != DrawingState.DONE_SECOND:
old_frame = resized.copy()
old_frame_color = cv.cvtColor(old_frame,
cv.COLOR_GRAY2RGB).copy()
# visualize
contour_image = cv.polylines(old_frame_color, [
np.array(self.clicked_pts_set_one).reshape((-1, 1, 2)),
np.array(self.clicked_pts_set_two).reshape((-1, 1, 2))
], False, color, thickness).copy()
cv.imshow('image', contour_image)
cv.waitKey(1)
elif self.drawing == DrawingState.DONE_SECOND and self.points_set_one is None and self.points_set_two is None:
old_frame = resized.copy()
old_frame_color = cv.cvtColor(old_frame,
cv.COLOR_GRAY2RGB).copy()
#draw two polygons around the two sets of selected points and use extract_contour_pts to get two good sets of points to track
contour_image = cv.polylines(old_frame_color.copy(), [
np.array(self.clicked_pts_set_one).reshape((-1, 1, 2))
], False, color, thickness).copy()
self.points_set_one = self.extract_contour_pts(
contour_image)
self.original_points_set_one = self.points_set_one.copy()
contour_image = cv.polylines(old_frame_color.copy(), [
np.array(self.clicked_pts_set_two).reshape((-1, 1, 2))
], False, color, thickness).copy()
self.points_set_two = self.extract_contour_pts(
contour_image)
self.original_points_set_two = self.points_set_two.copy()
# track and display specified points through images
# if it's the first image, we will just set the old_frame varable
elif first_loop:
old_frame = resized.copy()
# apply filters to frame
self.old_frame_filtered = image_filter(
old_frame, run_params)
first_loop = False
cv.waitKey(1)
else:
# read in new frame
frame = resized.copy()
frame_filtered = image_filter(frame, run_params)
#perform optical flow tracking to track where points went between image frames
tracked_contour_one, _, _ = cv.calcOpticalFlowPyrLK(
self.old_frame_filtered, frame_filtered,
self.points_set_one, None, **lk_params)
tracked_contour_two, _, _ = cv.calcOpticalFlowPyrLK(
self.old_frame_filtered, frame_filtered,
self.points_set_two, None, **lk_params)
tracked_contour_one = tracked_contour_one.reshape((-1, 2))
tracked_contour_two = tracked_contour_two.reshape((-1, 2))
# update for next iteration
self.old_frame_filtered = frame_filtered.copy()
self.points_set_one = tracked_contour_one.copy()
self.points_set_two = tracked_contour_two.copy()
frame_color = cv.cvtColor(frame_filtered,
cv.COLOR_GRAY2RGB).copy()
#calculate average distance to center of clusters, and reset if too large
mean_one = tuple(
np.mean(tracked_contour_one, axis=0, dtype=np.int))
mean_two = tuple(
np.mean(tracked_contour_two, axis=0, dtype=np.int))
sum_distances_one, sum_distances_two = 0, 0
for i in range(len(tracked_contour_one)):
x, y = tracked_contour_one[i].ravel()
cv.circle(frame_color, (int(x), int(y)), 3, (0, 0, 255),
-1)
sum_distances_one += (x - mean_one[0])**2 + (
y - mean_one[1])**2
for i in range(len(tracked_contour_two)):
x, y = tracked_contour_two[i].ravel()
cv.circle(frame_color, (int(x), int(y)), 3, (255, 0, 0),
-1)
sum_distances_two += (x - mean_two[0])**2 + (
y - mean_two[1])**2
average_distance_set_one = float(sum_distances_one) / len(
tracked_contour_one)
average_distance_set_two = float(sum_distances_two) / len(
tracked_contour_two)
max_average_distance = max(average_distance_set_one,
average_distance_set_two)
if max_average_distance > RESET_DISTANCE:
self.reset_points()
print("average squared distance was ",
max_average_distance)
print("resetting points!", flush=True)
continue
#draw line representing thickness
cv.line(frame_color, mean_one, mean_two, (255, 0, 255), 3)
middle_x = int((mean_one[0] + mean_two[0]) / 2)
topmost_y = max(mean_one[1], mean_two[1])
bottommost_y = min(mean_one[1], mean_two[1])
cv.line(frame_color, (middle_x - 10, topmost_y),
(middle_x + 10, topmost_y), (0, 255, 0), 3)
cv.line(frame_color, (middle_x - 10, bottommost_y),
(middle_x + 10, bottommost_y), (0, 255, 0), 3)
vertical_distance = topmost_y - bottommost_y
now = time.time()
str_now = str(now)
#send data to graphing program, and save every 10'th image
pipe.send(vertical_distance)
if counter == 10:
cv.imwrite(
os.path.join(os.getcwd(), self.IMAGE_DIRECTORY_RAW,
str_now) + ".jpg", resized)
cv.imwrite(
os.path.join(os.getcwd(),
self.IMAGE_DIRECTORY_FILTERED, str_now)
+ ".jpg", frame_color)
with open(self.THICKNESS_FILE, "a") as thickness_file:
thickness_file.write(str_now + ": " +
str(vertical_distance) + "\n")
counter = 0
cv.imshow('image', frame_color)
# wait 1ms
cv.waitKey(1)
counter += 1
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2021 xXSergeyXx
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
tests.py
|
import threading
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "'foo' is an invalid keyword argument for this function"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
with self.assertRaises(TypeError):
s.refresh_from_db(unknown_kwarg=10)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
|
vodloader_video.py
|
from vodloader_chapters import vodloader_chapters
from threading import Thread
from math import floor
import logging
import os
import datetime
import streamlink
import requests
import json
import pytz
class vodloader_video(object):
def __init__(self, parent, url, twitch_data, backlog=False, quality='best', part=1):
self.parent = parent
self.logger = logging.getLogger(f'vodloader.{self.parent.channel}.video')
self.part = part
self.backlog = backlog
self.quality = quality
self.passed = False
self.upload = self.parent.upload
self.keep = self.parent.keep
self.twitch_data = twitch_data
if backlog:
self.start_absolute = twitch_data['created_at']
self.id = twitch_data['stream_id']
self.vod_id = twitch_data['id']
else:
self.start_absolute = twitch_data['started_at']
self.id = twitch_data['id']
self.start_absolute = pytz.timezone('UTC').localize(datetime.datetime.strptime(self.start_absolute, '%Y-%m-%dT%H:%M:%SZ'))
self.start_absolute = self.start_absolute.astimezone(self.parent.tz)
self.start = datetime.datetime.now()
self.download_url = url
name = self.id
if self.part > 1:
name += f'.p{self.part}'
self.id += f'p{self.part}'
name += '.ts'
self.path = os.path.join(self.parent.download_dir, name)
self.chapters = self.chapters_init(twitch_data)
self.thread = Thread(target=self.buffload_stream, args=(), daemon=True)
self.thread.start()
def chapters_init(self, twitch_data):
if self.backlog:
chapters = self.get_vod_chapters()
else:
chapters = vodloader_chapters(twitch_data['game_name'], twitch_data['title'])
return chapters
def __del__(self):
pass
def get_stream(self, url, quality):
return self.parent.streamlink.streams(url)[quality]
def buffload_stream(self):
if not self.id in self.parent.status:
self.download_stream()
if self.upload and self.parent.status[self.id] != True:
self.upload_stream()
def download_stream(self, chunk_size=8192, max_length=60*(60*12-15), retry=10):
self.logger.info(f'Downloading stream from {self.download_url} to {self.path}')
stream = self.get_stream(self.download_url, self.quality)
buff = stream.open()
if self.backlog:
seglen = buff.worker.playlist_sequences[0].segment.duration
seq_limit = floor(max_length/seglen) * self.part
if self.part > 1:
buff.close()
stream.start_offset = (self.part - 1) * (max_length - 60 * seglen * (self.part - 1))
buff = stream.open()
error = 0
with open(self.path, 'wb') as f:
data = buff.read(chunk_size)
while data and error < retry:
if self.parent.end:
buff.close()
exit()
try:
f.write(data)
data = buff.read(chunk_size)
except OSError as err:
self.logger.error(err)
error += 1
if self.backlog:
should_pass = buff.worker.playlist_sequence > (seq_limit - 2)
should_close = buff.worker.playlist_sequence > seq_limit
else:
should_pass = (datetime.datetime.now() - self.start).seconds > (max_length-15)
should_close = (datetime.datetime.now() - self.start).seconds > max_length
if should_pass and not self.passed:
self.passed = True
self.logger.info(f'Max length of {max_length} seconds has been exceeded for {self.path}, continuing download in part {self.part+1}')
twitch_data = self.twitch_data.copy()
twitch_data['game_name'] = self.chapters.get_current_game()
twitch_data['title'] = self.chapters.get_current_title()
if self.backlog:
self.parent.backlog_video = vodloader_video(self.parent, self.download_url, twitch_data, backlog=self.backlog, quality=self.quality, part=self.part+1)
else:
self.parent.livestream = vodloader_video(self.parent, self.download_url, twitch_data, backlog=self.backlog, quality=self.quality, part=self.part+1)
if should_close:
buff.close()
break
buff.close()
self.parent.status[self.id] = False
self.parent.status.save()
self.logger.info(f'Finished downloading stream from {self.download_url}')
def upload_stream(self, chunk_size=4194304, retry=3):
self.parent.uploader.queue.append((self.path, self.get_youtube_body(self.parent.chapters_type), self.id, self.keep))
def get_youtube_body(self, chapters=False):
tvid = f'tvid:{self.id}'
timestamp = f'timestamp:{self.start_absolute.timestamp()}'
if self.part == 1 and self.passed: tvid += f'p{self.part}'
body = {
'snippet': {
'title': self.get_formatted_string(self.parent.uploader.youtube_args['title'], self.start_absolute),
'description': self.get_formatted_string(self.parent.uploader.youtube_args['description'], self.start_absolute),
'tags': [tvid, timestamp]
},
'status': {
'selfDeclaredMadeForKids': False
}
}
if 'tags' in self.parent.uploader.youtube_args: body['snippet']['tags'] += self.parent.uploader.youtube_args['tags']
if 'categoryId' in self.parent.uploader.youtube_args: body['snippet']['categoryId'] = self.parent.uploader.youtube_args['categoryId']
if 'privacy' in self.parent.uploader.youtube_args: body['status']['privacyStatus'] = self.parent.uploader.youtube_args['privacy']
if not self.backlog:
body['snippet']['tags'] += self.chapters.get_games()
if chapters:
if chapters.lower() == 'games' and self.chapters.get_game_chapters():
body['snippet']['description'] += f'\n\n\n\n{self.chapters.get_game_chapters()}'
if chapters.lower() == 'titles' and self.chapters.get_title_chapters():
body['snippet']['description'] += f'\n\n\n\n{self.chapters.get_title_chapters()}'
if self.part > 1:
body['snippet']['title'] = f'{body["snippet"]["title"]} Part {self.part}'
body['snippet']['title'] = self.filter_string(body['snippet']['title'])
body['snippet']['description'] = self.filter_string(body['snippet']['description'])
return body
@staticmethod
def filter_string(s):
nono_chars = '<>|'
return ''.join([x for x in s if not x in nono_chars])
def get_formatted_string(self, input, date):
output = input.replace('%C', self.parent.channel)
output = output.replace('%i', self.id)
output = output.replace('%g', self.chapters.get_first_game())
output = output.replace('%G', self.chapters.get_current_game())
output = output.replace('%t', self.chapters.get_first_title())
output = output.replace('%t', self.chapters.get_current_title())
output = date.strftime(output)
return output
def get_stream_markers(self, retry=3):
url = f'https://api.twitch.tv/kraken/videos/{self.vod_id}/markers?api_version=5&client_id={self.parent.twitch.app_id}'
for i in range(retry):
r = requests.get(url)
if r.status_code == 200:
return json.loads(r.content)
return None
def get_video(self, retry=3):
url = f'https://api.twitch.tv/kraken/videos/{self.vod_id}?api_version=5&client_id={self.parent.twitch.app_id}'
for i in range(retry):
r = requests.get(url)
if r.status_code == 200:
return json.loads(r.content)
return None
def get_vod_chapters(self):
video = self.get_video()
chapters = vodloader_chapters(video['game'], video['title'])
offset = 0
response = self.get_stream_markers()
if 'markers' in response and 'game_changes' in response['markers'] and response['markers']['game_changes']:
for marker in response['markers']['game_changes']:
offset += marker['time']
chapters.timestamps.append((chapters.get_timestamp_from_sec(offset), marker['label'], video['title']))
return chapters
|
refinement.mp.py
|
import pydiffvg
import argparse
import torch
import skimage.io
import os
import re
from shutil import copyfile
import shutil
from PIL import Image
import numpy as np
import torch.multiprocessing as mp
from torch.multiprocessing import Pool, Process, set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
gamma = 1.0
def cal_alignment_loss(args, save_path):
target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0
target = target.pow(gamma)
target = target.to(pydiffvg.get_device())
target = target.unsqueeze(0)
target = target.permute(0, 3, 1, 2) # NHWC -> NCHW
canvas_width, canvas_height, shapes, shape_groups = \
pydiffvg.svg_to_scene(args.svg)
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # bg
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
points_vars = []
for path in shapes:
#print(path)
#input()
path.points.requires_grad = True
points_vars.append(path.points)
color_vars = {}
for group in shape_groups:
group.fill_color.requires_grad = True
color_vars[group.fill_color.data_ptr()] = group.fill_color
color_vars = list(color_vars.values())
# Optimize
points_optim = torch.optim.Adam(points_vars, lr=1)
color_optim = torch.optim.Adam(color_vars, lr=0)
# Adam iterations.
for t in range(args.num_iter):
points_optim.zero_grad()
color_optim.zero_grad()
# Forward pass: render the image.
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # bg
*scene_args)
# Compose img with white background
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
# Convert img from HWC to NCHW
img = img.unsqueeze(0)
img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
loss = (img - target).pow(2).mean()
#if t%10 == 0:
# print('iteration:', t)
# print('render loss:', args.no_sample, loss.item())
# Backpropagate the gradients.
loss.backward()
# Take a gradient descent step.
points_optim.step()
color_optim.step()
for group in shape_groups:
group.fill_color.data.clamp_(0.0, 1.0)
if t == args.num_iter - 1:
pydiffvg.save_svg_paths_only(save_path, canvas_width, canvas_height, shapes, shape_groups)
return loss
def get_svg_glyph_bbox(svg_path):
fin = open(svg_path,'r')
path_ = fin.read().split('d="')[1]
path = path_.split('" fill=')[0]
path_splited = re.split(r"([mlc])", path)
commands = []
cur_x = 0.0
cur_y = 0.0
x_min = 1000
x_max = -1000
y_min = 1000
y_max = -1000
first_move = True
for idx in range(0,len(path_splited)):
if len(path_splited[idx]) == 0: continue
# x1,y1,x2,y2,x3,y3,x4,y4 are the absolute coords
if path_splited[idx] == 'm':
coords_str = path_splited[idx+1]
if first_move:
x4 = float(coords_str.split(' ')[1])
y4 = float(coords_str.split(' ')[2])
first_move = False
else:
x4 = cur_x + float(coords_str.split(' ')[1])
y4 = cur_y + float(coords_str.split(' ')[2])
cur_x = x4
cur_y = y4
x_min = min(cur_x, x_min)
x_max = max(cur_x, x_max)
y_min = min(cur_y, y_min)
y_max = max(cur_y, y_max)
if path_splited[idx] == 'l':
coords_str = path_splited[idx+1]
x4 = cur_x + float(coords_str.split(' ')[1])
y4 = cur_y + float(coords_str.split(' ')[2])
cur_x = x4
cur_y = y4
x_min = min(cur_x, x_min)
x_max = max(cur_x, x_max)
y_min = min(cur_y, y_min)
y_max = max(cur_y, y_max)
if path_splited[idx] == 'c':
coords_str = path_splited[idx+1]
x1 = cur_x
y1 = cur_y
x2 = cur_x + float(coords_str.split(' ')[1])
y2 = cur_y + float(coords_str.split(' ')[2])
x3 = cur_x + float(coords_str.split(' ')[3])
y3 = cur_y + float(coords_str.split(' ')[4])
x4 = cur_x + float(coords_str.split(' ')[5])
y4 = cur_y + float(coords_str.split(' ')[6])
x_min = min(x2, x3, x4, x_min)
x_max = max(x2, x3, x4, x_max)
y_min = min(y2, y3, y4, y_min)
y_max = max(y2, y3, y4, y_max)
cur_x = x4
cur_y = y4
return [x_min,x_max], [y_min,y_max]
def get_img_bbox(img_path):
img = Image.open(img_path)
img = 255 - np.array(img)
img0 = np.sum(img, axis = 0)
img1 = np.sum(img, axis = 1)
y_range = np.where(img1>127.5)[0]
x_range = np.where(img0>127.5)[0]
return [x_range[0],x_range[-1]], [y_range[0],y_range[-1]]
'''
def svg_bbox_align(svg_path, trgimg_path):
svg_xr, svg_yr = get_svg_glyph_bbox(svg_path)
img_xr, img_yr = get_img_bbox(trgimg_path)
svg_w = svg_xr[1] - svg_xr[0]
svg_h = svg_yr[1] - svg_yr[0]
svg_xc = (svg_xr[1] + svg_xr[0]) / 2.0
svg_yc = (svg_yr[1] + svg_yr[0]) / 2.0
img_w = img_xr[1] - img_xr[0] + 1
img_h = img_yr[1] - img_yr[0] + 1
img_xc = (img_xr[1] + img_xr[0]) / 2.0
img_yc = (img_yr[1] + img_yr[0]) / 2.0
def affine_coord(coord, x_or_y, cur_cmd, first_move):
if x_or_y % 2 == 0: # for x
if cur_cmd == 'm' and first_move:
new_coord = (coord - svg_xc) * (img_w / svg_w) + img_xc
res = str(new_coord)
else:
res = str((img_w / svg_w) * (coord))
else: # for y
if cur_cmd == 'm' and first_move:
new_coord = (coord - svg_yc) * (img_h / svg_h) + img_yc
res = str(new_coord)
else:
res = str((img_h / svg_h) * (coord))
return res
svg_raw = open(svg_path,'r').read()
fout = open(svg_path.split('.svg')[0] + '_256.svg','w')
fout.write('<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="256px" height="256px" style="-ms-transform: rotate(360deg); -webkit-transform: rotate(360deg); transform: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256">')
coord = '<path' + svg_raw.split('<path')[1]
tokens = coord.split(' ')
newcoord = ''
first_move = True
x_or_y = 0
for k in tokens:
if k[0] != '<' and k[0] != 'd' and k[0] != 'm' and k[0] != 'c' and k[0] != 'l' and k[0] != 'f':
if k[-1] != '"':
newcoord += affine_coord(float(k), x_or_y, cur_cmd, first_move)
if cur_cmd == 'm': first_move = False
x_or_y += 1
newcoord += ' '
else:
newcoord += affine_coord(float(k[0:len(k)-1]), x_or_y, cur_cmd, first_move)
x_or_y += 1
newcoord += '" '
else:
cur_cmd = k
newcoord += k
newcoord += ' '
fout.write(newcoord)
fout.close()
'''
def svg_bbox_align(svg_path, trgimg_path):
svg_raw = open(svg_path,'r').read()
fout = open(svg_path.split('.svg')[0] + '_256.svg','w')
fout.write('<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="256px" height="256px" style="-ms-transform: rotate(360deg); -webkit-transform: rotate(360deg); transform: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256">')
scalar = 256/24
coord = '<path' + svg_raw.split('<path')[1]
tokens = coord.split(' ')
newcoord = ''
for k in tokens:
if k[0] != '<' and k[0] != 'd' and k[0] != 'm' and k[0] != 'c' and k[0] != 'l' and k[0] != 'f':
if k[-1] != '"':
newcoord += str(float(k) * scalar)
newcoord += ' '
else:
newcoord += str(float(k[0:len(k)-1]) * scalar)
newcoord += '" '
else:
newcoord += k
newcoord += ' '
fout.write(newcoord)
fout.close()
def process_s1(process_id, chars_per_process, args):
svg_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs')
imghr_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'imgs_256')
svg_outpath = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs_bestcand')
if not os.path.exists(svg_outpath):
os.mkdir(svg_outpath)
for i in range(process_id * chars_per_process, (process_id + 1) * chars_per_process):
if i >= args.num_chars:
break
# find the best candidate
minLoss = 10000
noMin = 0
tempLoss = 0
# pick the best candidate
for j in range(0, int(args.candidate_nums)):
#print(f'processing_char_{i:02d}_candidate_{j:02d}')
args.no_sample = j
args.svg = os.path.join(svg_path, 'syn_%02d_%02d.svg'%(i,j))
args.target = os.path.join(imghr_path, '%02d_256.png'%i)
#svg_aligned = align(args.svg, args.target)
svg_bbox_align(args.svg, args.target)
args.svg = os.path.join(svg_path, 'syn_%02d_%02d_256.svg'%(i,j))
#svg_init_aligned = os.path.join(svg_path, 'syn_%02d_'%i, '%02d'%j, '.svg')
tempLoss = cal_alignment_loss(args, save_path = args.svg.split('.svg')[0] + '_r.svg')
#print(f'finished_char_{i:02d}_candidate_{j:02d}')
if tempLoss < minLoss:
noMin = j
minLoss = tempLoss
# do longer optimization
src_path = os.path.join(svg_path, 'syn_%02d_%02d_256.svg'%(i,noMin))
trg_path = os.path.join(svg_outpath, 'syn_%02d_256.svg'%(i))
shutil.copy(src_path, trg_path)
def process_s2(process_id, chars_per_process, args):
imghr_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'imgs_256')
svg_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs')
imghr_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'imgs_256')
svg_cdt_path = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs_bestcand')
svg_outpath = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs_refined')
if not os.path.exists(svg_outpath):
os.mkdir(svg_outpath)
for i in range(process_id * chars_per_process, (process_id + 1) * chars_per_process):
if i >= args.num_chars:
break
# refine the best candidate
args.num_iter = 300
args.svg = os.path.join(svg_cdt_path, 'syn_%02d_256.svg'%(i))
args.target = os.path.join(imghr_path, '%02d_256.png'%i)
tempLoss = cal_alignment_loss(args, save_path = os.path.join(svg_outpath, 'syn_%02d.svg'%(i)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--svg", help="source SVG path", type=str, default='none')
parser.add_argument("--target", help="target image path", type=str, default='none')
parser.add_argument("--use_lpips_loss", dest='use_lpips_loss', action='store_true')
parser.add_argument("--num_iter", type=int, default=40)
parser.add_argument("--no_sample", type=int, default=0)
parser.add_argument("--num_processes", type=int, default=4)
parser.add_argument("--num_chars", type=int, default=52)
parser.add_argument("--fontid", type=str, default='17')
parser.add_argument("--experiment_name", type=str, default='dvf')
parser.add_argument("--candidate_nums", type=str, default='20')
args = parser.parse_args()
svg_outpath = os.path.join('experiments', args.experiment_name + '_main_model/results/', '%04d'%int(args.fontid), 'svgs_refined')
chars_per_process = args.num_chars // args.num_processes
print("stage 1: find the best candidates ...")
processes = [mp.Process(target=process_s1, args=[pid,chars_per_process,args]) for pid in range(args.num_processes + 1)]
for p in processes:
p.start()
for p in processes:
p.join()
print("stage 2: further refine these candidates ...")
processes = [mp.Process(target=process_s2, args=[pid,chars_per_process,args]) for pid in range(args.num_processes + 1)]
for p in processes:
p.start()
for p in processes:
p.join()
svg_merge_outpath = os.path.join(svg_outpath, f"syn_svg_merge.html")
fout = open(svg_merge_outpath, 'w')
for i in range(0,52):
svg = open(os.path.join(svg_outpath, 'syn_%02d.svg'%(i)),'r').read()
svg = svg.replace('<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="256" height="256">', '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="64px" height="64px" style="-ms-transform: rotate(360deg); -webkit-transform: rotate(360deg); transform: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256">')
fout.write(svg)
if i > 0 and i % 13 == 12:
fout.write('<br>')
fout.close()
|
utils.py
|
from biosimulators_utils.log.data_model import CombineArchiveLog # noqa: F401
from biosimulators_utils.report.data_model import SedDocumentResults # noqa: F401
import functools
import importlib
import multiprocessing
import os
import sys
import time
import types # noqa: F401
import yaml
__all__ = [
'get_simulators',
'get_simulator_api',
'get_simulator_metadata',
'use_simulator_api_to_exec_sedml_docs_in_combine_archive',
'exec_in_subprocess',
]
@functools.lru_cache(maxsize=None)
def get_simulators():
""" Get the ids and APIs of the available simulation tools
Returns:
:obj:`list` of :obj:`dict`: list of the id and name of the module which implements the API for
each available simulation tool
"""
with open(os.path.join(os.path.dirname(__file__), 'simulators.yml'), 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
def get_simulator_api(api, reload=False):
""" Get the BioSimulators API for a simulator
Args:
api (:obj:`str`): module which implements the API for the simulator
reload (:obj:`bool`, optional): whether to reload the API
Returns:
:obj:`types.ModuleType`
"""
module = importlib.import_module(api)
if reload:
importlib.reload(module)
return module
def get_simulator_metadata(id):
""" Get metadata about a simulator
Args:
id (:obj:`str`): BioSimulators id of the simulator
Returns:
:obj:`dict`: metadata about the simulator
"""
simulator = next(simulator for simulator in get_simulators() if simulator['id'] == id)
id = simulator['id']
name = simulator['name']
api_module = simulator['api']['module']
api = get_simulator_api(api_module)
version = api.get_simulator_version()
api_version = api.__version__
return {
'_type': 'Simulator',
'id': id,
'name': name,
'version': version,
'api': {
'_type': 'SimulatorApi',
'module': api_module,
'package': simulator['api']['package'],
'version': api_version,
},
'specs': 'https://api.biosimulators.org/simulators/{}/{}'.format(id, version),
}
def use_simulator_api_to_exec_sedml_docs_in_combine_archive(api_name, *args, **kwargs):
""" Execute the SED tasks defined in a COMBINE/OMEX archive and save the outputs
Args:
api (:obj:`str`): module which implements the API for the simulator
*args (:obj:`list`): positional arguments to ``exec_sedml_docs_in_combine_archive``
**kwargs (:obj:`dict`): keyword arguments to ``exec_sedml_docs_in_combine_archive``
Returns:
: obj: `tuple`:
*: obj:`SedDocumentResults`: results
*: obj:`dict` in the ``SimulationRunResults`` schema: log
"""
api = get_simulator_api(api_name)
results, log = api.exec_sedml_docs_in_combine_archive(*args, **kwargs)
if log:
log = log.to_json()
return results, log
class Process(multiprocessing.context.ForkProcess):
""" Fork process which collects the exceptions of its child
Attributes:
_parent_conn (:obj:`multiprocessing.connection.Connection`): connection for the parent
_child_conn (:obj:`multiprocessing.connection.Connection`): connection for the child
_exception (:obj:`Exception` or :obj:`None`): exception, if any, from the process' child
Inspired by https: // stackoverflow.com/questions/19924104/
"""
def __init__(self, *args, **kwargs):
super(multiprocessing.context.ForkProcess, self).__init__(*args, **kwargs)
self._parent_conn, self._child_conn = multiprocessing.Pipe()
self._exception = None
def run(self):
""" Run the process """
try:
super(multiprocessing.context.ForkProcess, self).run()
self._child_conn.send(False)
except Exception as exception:
self._child_conn.send(exception.with_traceback(sys.exc_info()[2]))
@property
def exception(self):
""" Get the exception from process' child, if any
Returns:
:obj:`Exception` or :obj:`None`: exception, if any, from the process' child
"""
if self._parent_conn.poll():
self._exception = self._parent_conn.recv()
return self._exception
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs):
""" Execute a function in a fork
Args:
func (:obj:`types.FunctionType`): function
* args (:obj:`list`): list of positional arguments for the function
poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess
timeout (:obj:`float`, optional): maximum execution time in seconds
**kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function
Returns:
:obj:`object`: result of the function
"""
context_instance = multiprocessing.get_context('fork')
queue = context_instance.Queue()
process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs)
process.start()
start_time = time.time()
while process.exception is None:
time.sleep(poll_interval)
if timeout is not None and (time.time() - start_time) > timeout:
raise TimeoutError('Execution did not complete in {} s.'.format(timeout))
if process.exception:
raise process.exception
results = queue.get()
return results
def subprocess_target(queue, func, *args, **kwargs):
""" Target executer for a subprocess
Args:
queue (:obj:`multiprocessing.queues.Queue`): queue to send the results of the function to
func (:obj:`types.FunctionType`): function to execute
args (:obj:`list`): list of positional arguments for the function
kwargs (:obj:`dict`): dictionary of keyword arguments for the function
"""
result = func(*args, **kwargs)
queue.put(result)
|
pixels_4mic_hat.py
|
import apa102
import time
import threading
from gpiozero import LED
try:
import queue as Queue
except ImportError:
import Queue as Queue
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern
class Pixels:
PIXELS_N = 12
def __init__(self, pattern=GoogleHomeLedPattern):
self.pattern = pattern(show=self.show)
self.dev = apa102.APA102(num_led=self.PIXELS_N)
self.power = LED(5)
self.power.on()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
self.last_direction = None
def wakeup(self, direction=0):
self.last_direction = direction
def f():
self.pattern.wakeup(direction)
self.put(f)
def listen(self):
if self.last_direction:
def f():
self.pattern.wakeup(self.last_direction)
self.put(f)
else:
self.put(self.pattern.listen)
def think(self):
self.put(self.pattern.think)
def speak(self):
self.put(self.pattern.speak)
def off(self):
self.put(self.pattern.off)
def put(self, func):
self.pattern.stop = True
self.queue.put(func)
def _run(self):
while True:
func = self.queue.get()
self.pattern.stop = False
func()
def show(self, data):
for i in range(self.PIXELS_N):
self.dev.set_pixel(i, int(data[4*i + 1]), int(data[4*i + 2]), int(data[4*i + 3]))
self.dev.show()
pixels = Pixels()
if __name__ == '__main__':
while True:
try:
pixels.wakeup()
time.sleep(3)
pixels.think()
time.sleep(3)
pixels.speak()
time.sleep(6)
pixels.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixels.off()
time.sleep(1)
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import resource
import faulthandler
from collections import deque
from threading import Thread, Event
from inspect import getdoc
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from log import *
from vpp_object import VppObjectRegistry
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
while not testclass.pump_thread_stop_flag.wait(0):
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 1024)
testclass.vpp_stdout_deque.append(read)
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 1024)
testclass.vpp_stderr_deque.append(read)
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
try:
s = os.getenv("EXTENDED_TESTS")
return True if s.lower() in ("y", "yes", "1") else False
except:
return False
return False
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
try:
s = os.getenv("STEP")
cls.step = True if s.lower() in ("y", "yes", "1") else False
except:
cls.step = False
try:
d = os.getenv("DEBUG")
except:
d = None
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
try:
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
except:
pass
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, coredump_size, "}",
"api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{",
"disable", "}", "}"]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except Exception as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
cls.logger = getLogger(cls.__name__)
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-' + cls.__name__ + '-')
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = cls.tempdir.split("/")[-1]
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.sleep(0.1, "after vpp startup, before initial poll")
try:
hook.poll_vpp()
except:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except:
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except:
t, v, tb = sys.exc_info()
try:
cls.quit()
except:
pass
raise t, v, tb
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.quit()
cls.file_handler.close()
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show run"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes
"""
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, interfaces):
"""
Create loopback interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppLoInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size):
"""
Extend packet to given size by padding with spaces
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
packet[Raw].load += ' ' * extend
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
class TestCasePrinter(object):
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_test_case_set"):
self._test_case_set = set()
def print_test_case_heading_if_first_time(self, case):
if case.__class__ not in self._test_case_set:
print(double_line_delim)
print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW))
print(double_line_delim)
self._test_case_set.add(case.__class__)
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
def __init__(self, stream, descriptions, verbosity):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.printer = TestCasePrinter()
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSuccess() %s.%s(%s) called"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc,
reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addFailure(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("FAIL", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
else:
self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addError(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("ERROR", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
else:
self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
# TODO: if none print warning not raise exception
short_description = test.shortDescription()
if self.descriptions and short_description:
return short_description
else:
return str(test)
def startTest(self, test):
"""
Start a test
:param test:
"""
self.printer.print_test_case_heading_if_first_time(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Stop a test
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
def printErrors(self):
"""
Print errors from running the test case
"""
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
test_option = "TEST"
def parse_test_option(self):
try:
f = os.getenv(self.test_option)
except:
f = None
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(self.test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
return filter_file_name, filter_class_name, filter_func_name
def filter_tests(self, tests, filter_file, filter_class, filter_func):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = self.filter_tests(t, filter_file, filter_class,
filter_func)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if filter_file and filter_file != parts[0]:
continue
if filter_class and filter_class != parts[1]:
continue
if filter_func and filter_func != parts[2]:
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = self.parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filtered = self.filter_tests(test, filter_file, filter_class,
filter_func)
print("%s out of %s tests match specified filters" % (
filtered.countTestCases(), test.countTestCases()))
if not running_extended_tests():
print("Not running extended tests (some tests will be skipped)")
return super(VppTestRunner, self).run(filtered)
|
notif_handler.py
|
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
from urlparse import urlparse
import cgi
import threading
import sys, traceback
from config import Config
from utils import restartDHCP
from dhcp_conf_helper import DhcpConfEntry, DhcpConfHelper
# This class will handles any incoming requests comming from the bare metals installing SUSE
class NotificationHandler(BaseHTTPRequestHandler):
_dhcpConfFilename = Config.DHCP_CONF
_server = None
@classmethod
def setServer(cls, server):
cls._server = server
@classmethod
def setDhcpConfFilename(cls, filename):
cls._dhcpConfFilename = filename
def shutdownHandler(self):
stopServerThread = threading.Thread(target=self._server.shutdown)
stopServerThread.daemon = True
stopServerThread.start()
def returnPage(self,text):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
self.wfile.write(text)
return
def returnAckJson(self):
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
# Send the html message
self.wfile.write('{"status":"success"}')
return
#Handler for the GET requests
def do_GET(self):
try:
if self.path.startswith("/installationCompleted"):
if self.path == "/installationCompleted" or self.path == "/installationCompleted?":
print( "Expected params")
elif self.path.startswith("/installationCompleted?"):
if self.path.count('?') > 0:
query = urlparse(self.path).query
try:
params = dict(qc.split("=") for qc in query.split("&"))
print("")
if 'hostname' in params:
hostname = params['hostname']
print("Received notification from '%s' that first stage of OS installation completed. " % hostname)
self.handleInstallationCompleted(hostname)
except:
print( "Failed parsing the parameters: %s" % self.path)
traceback.print_exc(file=sys.stdout)
self.send_error(500, "Unexpected error occurred")
else:
self.send_error(400, "Invalid parameter")
elif self.path.startswith("/shutdown"):
self.returnAckJson()
self.shutdownHandler()
else:
self.send_error(404,'File Not Found: %s' % self.path)
except IOError:
self.send_error(500,'Unexpected error for path: %s' % self.path)
# Handler for the POST requests
def do_POST(self):
self.send_error(405,'POST not supported: %s' % self.path)
return
def handleInstallationCompleted(self, hostname):
dhcpConf = DhcpConfHelper(self._dhcpConfFilename)
dhcpGroup = dhcpConf.getGroup()
success = False
try:
if dhcpGroup.removeChild(DhcpConfEntry.Type.Host, hostname):
print("DHCP configuration for host '%s' removed." % hostname)
if dhcpConf.save():
print("\nChanges saved in %s\n" % dhcpConf.getFilename())
restartDHCP()
else:
print("DHCP configuration for host '%s' not found." % hostname)
success = True
print("Processed notification from '%s' successfully" % hostname)
self.returnAckJson()
except:
print("Failure to handle event from: %s" % hostname)
traceback.print_exc(file=sys.stdout)
self.send_error(400, "Invalid parameter")
# Check if there are any more server to wait for.
serversList = dhcpGroup.getChildren(DhcpConfEntry.Type.Host)
# Check if there are any servers left to wait for.
if serversList is None or len(serversList) == 0:
print("No more hosts to wait for. Stopping the listener.")
self.shutdownHandler()
# Return the status of the processing
return success
# server = None
# try:
# # Create the server and define the handler to manage the incomming requests
# server = HTTPServer(('', bootServerListenPort), NotificationHandler)
# print 'Started httpserver on port ' , bootServerListenPort
# # Wait forever for incoming http requests
# server.serve_forever()
# except KeyboardInterrupt:
# print '^C received, shutting down the web server'
# server.socket.close()
|
example.py
|
from gevent import monkey; monkey.patch_all() # noqa
import logging
import time
from watchdog_gevent import Observer
from watchdog.events import LoggingEventHandler
from threading import Thread
logging.basicConfig(level=logging.DEBUG)
running = True
def printer():
global running
logger = logging.getLogger("printer")
while running:
logger.info("Ping!")
time.sleep(1)
try:
pinger = Thread(target=printer)
pinger.start()
observer = Observer()
observer.schedule(LoggingEventHandler(), ".", recursive=True)
observer.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
running = False
observer.stop()
observer.join()
|
InventoryBuilder.py
|
from flask import Flask
from flask import request
from flask import abort
from flask import jsonify
from gevent.pywsgi import WSGIServer
from threading import Thread
from resources.Vcenter import Vcenter
from tools.Resources import Resources
import time
import json
import os
class InventoryBuilder:
def __init__(self, json, port, sleep):
self.json = json
self.port = int(port)
self.sleep = sleep
self._user = os.environ["USER"]
self._password = os.environ["PASSWORD"]
self.vcenter_dict = dict()
self.target_tokens = dict()
self.iterated_inventory = dict()
self.successful_iteration_list = [0]
self.wsgi_address = '0.0.0.0'
if 'LOOPBACK' in os.environ:
if os.environ['LOOPBACK'] == '1':
self.wsgi_address = '127.0.0.1'
thread = Thread(target=self.run_rest_server)
thread.start()
self.query_inventory_permanent()
def run_rest_server(self):
collectors = []
metrics = []
app = Flask(__name__)
print('serving /vrops_list on', str(self.port))
@app.route('/vrops_list', methods=['GET'])
def vrops_list():
return json.dumps(self.vrops_list)
print('serving /inventory on', str(self.port))
@app.route('/vcenters/<int:iteration>', methods=['GET'])
def vcenters(iteration):
return self.iterated_inventory[str(iteration)]['vcenters']
@app.route('/datacenters/<int:iteration>', methods=['GET'])
def datacenters(iteration):
return self.iterated_inventory[str(iteration)]['datacenters']
@app.route('/clusters/<int:iteration>', methods=['GET'])
def clusters(iteration):
return self.iterated_inventory[str(iteration)]['clusters']
@app.route('/hosts/<int:iteration>', methods=['GET'])
def hosts(iteration):
return self.iterated_inventory[str(iteration)]['hosts']
@app.route('/datastores/<int:iteration>', methods=['GET'])
def datastores(iteration):
return self.iterated_inventory[str(iteration)]['datastores']
@app.route('/vms/<int:iteration>', methods=['GET'])
def vms(iteration):
return self.iterated_inventory[str(iteration)]['vms']
@app.route('/iteration', methods=['GET'])
def iteration():
return_iteration = self.successful_iteration_list[-1]
return str(return_iteration)
# debugging purpose
@app.route('/iteration_store', methods=['GET'])
def iteration_store():
return_iteration = self.successful_iteration_list
return(json.dumps(return_iteration))
@app.route('/register', methods=['POST'])
def post_registered_collectors():
if not request.json:
abort(400)
collector = {
'collector': request.json["collector"],
'metrics': request.json["metric_names"]
}
collectors.append(collector)
return jsonify({"collectors registered": collectors})
@app.route('/register', methods=['GET'])
def get_registered_collectors():
return jsonify({"collectors registered": collectors})
@app.route('/metrics', methods=['POST'])
def collect_metric_names():
if not request.json:
abort(400)
metric = {
'metric_name': request.json['metric_name']
}
metrics.append(metric)
return jsonify({"collector metrics names ": metrics})
@app.route('/metrics', methods=['GET'])
def get_metric_names():
return jsonify({"metrics": metrics})
@app.route('/metrics', methods=['DELETE'])
def delete_metric_names():
metrics.clear()
return jsonify({"metrics": metrics})
# FIXME: this could basically be the always active token list. no active token? refresh!
@app.route('/target_tokens', methods=['GET'])
def token():
return json.dumps(self.target_tokens)
try:
if os.environ['DEBUG'] >= '2':
WSGIServer((self.wsgi_address, self.port), app).serve_forever()
else:
WSGIServer((self.wsgi_address, self.port), app, log=None).serve_forever()
except TypeError as e:
print('Problem starting server, you might want to try LOOPBACK=0 or LOOPBACK=1')
print('Current used options:', str(self.wsgi_address), 'on port', str(self.port))
print(e)
def get_vrops(self):
with open(self.json) as json_file:
netbox_json = json.load(json_file)
self.vrops_list = [target['labels']['server_name'] for target in netbox_json if
target['labels']['job'] == "vrops"]
def query_inventory_permanent(self):
# first iteration to fill is 1. while this is not ready,
# curl to /iteration would still report 0 to wait for actual data
self.iteration = 1
while True:
# get vrops targets every run in case we have new targets appearing
self.get_vrops()
if len(self.successful_iteration_list) > 3:
iteration_to_be_deleted = self.successful_iteration_list.pop(0)
# initial case, since 0 is never filled in iterated_inventory
if iteration_to_be_deleted == 0:
continue
self.iterated_inventory.pop(str(iteration_to_be_deleted))
if os.environ['DEBUG'] >= '1':
print("deleting iteration", str(iteration_to_be_deleted))
# initialize empty inventory per iteration
self.iterated_inventory[str(self.iteration)] = dict()
if os.environ['DEBUG'] >= '1':
print("real run " + str(self.iteration))
for vrops in self.vrops_list:
if not self.query_vrops(vrops):
if os.environ['DEBUG'] >= '1':
print("retrying connection to", vrops, "in next iteration", str(self.iteration + 1))
self.get_vcenters()
self.get_datacenters()
self.get_clusters()
self.get_hosts()
self.get_datastores()
self.get_vms()
if len(self.iterated_inventory[str(self.iteration)]['vcenters']) > 0:
self.successful_iteration_list.append(self.iteration)
else:
# immediately withdraw faulty inventory
if os.environ['DEBUG'] >= '1':
print("withdrawing current iteration", self.iteration)
self.iterated_inventory.pop(str(self.iteration))
self.iteration += 1
if os.environ['DEBUG'] >= '1':
print("inventory relaxing before going to work again")
time.sleep(int(self.sleep))
def query_vrops(self, vrops):
if os.environ['DEBUG'] >= '1':
print("querying " + vrops)
token = Resources.get_token(target=vrops)
if not token:
return False
self.target_tokens[vrops] = token
vcenter = self.create_resource_objects(vrops, token)
self.vcenter_dict[vrops] = vcenter
return True
def create_resource_objects(self, vrops, token):
for adapter in Resources.get_adapter(target=vrops, token=token):
if os.environ['DEBUG'] >= '2':
print("Collecting vcenter: " + adapter['name'])
vcenter = Vcenter(target=vrops, token=token, name=adapter['name'], uuid=adapter['uuid'])
vcenter.add_datacenter()
for dc_object in vcenter.datacenter:
if os.environ['DEBUG'] >= '2':
print("Collecting Datacenter: " + dc_object.name)
dc_object.add_cluster()
for cl_object in dc_object.clusters:
if os.environ['DEBUG'] >= '2':
print("Collecting Cluster: " + cl_object.name)
cl_object.add_host()
for hs_object in cl_object.hosts:
if os.environ['DEBUG'] >= '2':
print("Collecting Host: " + hs_object.name)
hs_object.add_datastore()
for ds_object in hs_object.datastores:
if os.environ['DEBUG'] >= '2':
print("Collecting Datastore: " + ds_object.name)
hs_object.add_vm()
for vm_object in hs_object.vms:
if os.environ['DEBUG'] >= '2':
print("Collecting VM: " + vm_object.name)
return vcenter
def get_vcenters(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
tree[vcenter.uuid] = {
'uuid': vcenter.uuid,
'name': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['vcenters'] = tree
return tree
def get_datacenters(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
for dc in vcenter.datacenter:
tree[dc.name] = {
'uuid': dc.uuid,
'name': dc.name,
'parent_vcenter_uuid': vcenter.uuid,
'parent_vcenter_name': vcenter.name,
'target': dc.target,
'token': dc.token,
}
self.iterated_inventory[str(self.iteration)]['datacenters'] = tree
return tree
def get_clusters(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
for dc in vcenter.datacenter:
for cluster in dc.clusters:
tree[cluster.uuid] = {
'uuid': cluster.uuid,
'name': cluster.name,
'parent_dc_uuid': dc.uuid,
'parent_dc_name': dc.name,
'vcenter': vcenter.name,
'target': cluster.target,
'token': cluster.token,
}
self.iterated_inventory[str(self.iteration)]['clusters'] = tree
return tree
def get_hosts(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
for dc in vcenter.datacenter:
for cluster in dc.clusters:
for host in cluster.hosts:
tree[host.uuid] = {
'uuid': host.uuid,
'name': host.name,
'parent_cluster_uuid': cluster.uuid,
'parent_cluster_name': cluster.name,
'datacenter': dc.name,
'target': host.target,
'token': host.token,
}
self.iterated_inventory[str(self.iteration)]['hosts'] = tree
return tree
def get_datastores(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
for dc in vcenter.datacenter:
for cluster in dc.clusters:
for host in cluster.hosts:
for ds in host.datastores:
tree[ds.uuid] = {
'uuid': ds.uuid,
'name': ds.name,
'parent_host_uuid': host.uuid,
'parent_host_name': host.name,
'cluster': cluster.name,
'datacenter': dc.name,
'target': ds.target,
'token': ds.token,
}
self.iterated_inventory[str(self.iteration)]['datastores'] = tree
return tree
def get_vms(self):
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
for dc in vcenter.datacenter:
for cluster in dc.clusters:
for host in cluster.hosts:
for vm in host.vms:
tree[vm.uuid] = {
'uuid': vm.uuid,
'name': vm.name,
'parent_host_uuid': host.uuid,
'parent_host_name': host.name,
'cluster': cluster.name,
'datacenter': dc.name,
'target': vm.target,
'token': vm.token,
}
self.iterated_inventory[str(self.iteration)]['vms'] = tree
return tree
|
server.py
|
#Advanced NGN: Rotational Program
#File: Database Server - Content Provider Front End
#Writer: Rohit Dilip Kulkarni
#Team Members: Mansi, Shikha, Rohit
#Dated: 31 March 2019
from flask import Flask, jsonify, request, render_template, Response
import json
import os
import requests
import time
import threading
app = Flask(__name__)
"""
/api/version
GET: Gets the version number
JSON: NONE
RETURN TRUE: {"success":True,"cp_version":1}
/cp/content
GET: Gets the contents of the customers files
JSON: {"username":"angn","password":"abcd","version":"12","url":"angn.com"}
RETURN TRUE: {'success':True,'data':file_data}
RETURN FALSE: {'success':False,'comment':'Unauthorized Access'}
{'success':False,'comment':'No such version found'}
CHECKS:
{'success':False,'comment':'Need admin username'}
{'success':False,'comment':'Need admin password'}
{'success':False,'comment':'Need version number'}
POST: Push the customers data
JSON: {"username":"angn","password":"abcd","version":"12","url":"angn.com","data":"http file data"}
RETURN TRUE: {'success':True}
RETURN FALSE: CHECKS
/account
GET: All the information of acccount db - only for admin
JSON: {"username":"admin","password":"admin"}
RETURN TRUE: {'success':True,'data':file_data - of cp_account_detail}
RETURN FALSE: {'success':False,'comment':'Unauthorized Access'}
CHECKS
POST: Creates new accounts
JSON: {"url":"angn.com","selection":"active-active","type_server":"different","replica":2}
RETURN TRUE: {'success':True}
RETURN FALSE: CHECKS
/account/users
GET: All the information of users db - only for admin
JSON: {"username":"admin","password":"admin"}
RETURN TRUE: {'success':True,'data':file-data of user_db}
RETURN FALSE: {'success':False,'comment':'Unauthorized Access'}
CHECKS
POST: Creates new accounts
JSON: {"username":"angn","password":"abcd","url":"mainpage.com","email":"boo@angn.com","owner":True}
RETURN TRUE: {'success':True}
RETURN FALSE: CHECKS
/cp/network
GET: for cp network data
JSON: {"username":"admin","password":"admin"}
RETURN TRUE: {'success':True,'data':customer_db - info for cp backend}
RETURN FALSE: CHECKS
/cp/statistics
GET: All the information of statistics db - only for admin
JSON: {"username":"admin","password":"admin"}
RETURN TRUE: {'success':True,'data':file-data of statistics}
RETURN FALSE: {'success':False,'comment':'Unauthorized Access'}
CHECKS
POST: Creates new file for statistics
JSON: {"username":"admin","password":"admin","statistics":{},"other":{}}
RETURN TRUE: {'success':True}
RETURN FALSE: CHECKS
/synch
GET: Information for server synch
/synch/files
GET: Sych the files from promary server
"""
@app.route('/api/version', methods=['GET'])
def get_tasks():
global cp_version
return Response(json.dumps({'success':True,'cp_version':cp_version}), status=200, mimetype='application/json')
@app.route('/cp/content',methods=['GET','POST'])
#for main file service
def creating_file():
global cp_version
if request.method == "POST":
data=request.json
checks,filename,edit_check=creating_file_checks(data)
if checks != True:
return checks
else:
if edit_check != True:
return (edit_check)
with open(filename,'w') as fl:
json.dump(data, fl)
cp_version+=1
return (Response(json.dumps({'success':True}), status=200, mimetype='application/json'))
elif request.method == "GET":
data=request.json
checks,filename,edit_check=creating_file_checks(data)
if checks != True:
return checks
else:
if os.path.isfile(filename):
with open(filename, 'r') as fl:
file_data = json.load(fl)
return (Response(json.dumps({'success':True,'data':file_data}), status=200, mimetype='application/json'))
else:
return (Response(json.dumps({'success':False,'comment':'File not found - Incorect Version'}), status=400, mimetype='application/json'))
def creating_file_checks(data):
global user_db
#return the HTTP - json error and filename
if 'username' not in data.keys(): #check if username field present in json
return ((Response(json.dumps({'success':False,'comment':'Need admin username'}), status=400, mimetype='application/json')),False,False)
elif 'password' not in data.keys(): #check if password field present in json
return ((Response(json.dumps({'success':False,'comment':'Need admin password'}), status=400, mimetype='application/json')),False,False)
elif 'version' not in data.keys(): #check if version field present in json
return ((Response(json.dumps({'success':False,'comment':'Need version number'}), status=400, mimetype='application/json')),False,False)
elif 'url' not in data.keys(): #check if version field present in json
return ((Response(json.dumps({'success':False,'comment':'Need url for webpage'}), status=400, mimetype='application/json')),False,False)
else:
if data['username'] != 'admin' and data['password'] != user_db['admin']['password']: #global access to admin
if data['username'] not in user_db.keys():
return ((Response(json.dumps({'success':False,'comment':'No user found'}), status=400, mimetype='application/json')),False,False)
elif data['password'] != user_db[data['username']]["password"]:
return ((Response(json.dumps({'success':False,'comment':'Incorrect Password'}), status=400, mimetype='application/json')),False,False)
elif data['url'] != user_db[data['username']]["url"]:
return ((Response(json.dumps({'success':False,'comment':'Incorrect url edit request'}), status=400, mimetype='application/json')),False,False)
if user_db[data['username']]['owner'] != True: #check if the user is able to edit
edit_check=(Response(json.dumps({'success':False,'comment':'User has no auth to edit'}), status=400, mimetype='application/json'))
else:
edit_check=True
filename="cp_"+data['url']+"_"+data["version"]+".json"
return (True,filename,edit_check)
def admin_request_file(data):
#data {"username":"admin","password":"admin","filename":"<file>"}
global user_db
if sorted(['username','password','filename']) == sorted(data.keys()):
if data['username']=='admin' and data['password'] == user_db['admin']['password']:
if not os.path.isfile(data['filename']):
return (Response(json.dumps({'success':False,'comment':'File not present in server'}), status=400, mimetype='application/json'))
with open(data['filename'], 'r') as fl:
data = json.load(fl)
return (Response(json.dumps({'success':True,'data':data}), status=200, mimetype='application/json'))
else:
return (Response(json.dumps({'success':False,'comment':'Unauthorized Access'}), status=400, mimetype='application/json'))
else:
return (Response(json.dumps({'success':False,'comment':'Need admin username and password'}), status=400, mimetype='application/json'))
@app.route('/account',methods=['GET','POST'])
#for account details username and password
def acount_details():
global cp_version
global primary_master
global secondary_master
global account_db
global user_db
if request.method == "POST":
data=dict(request.json)
checks=acount_data_checks(data)
if checks != True:
return (checks)
elif sorted(['url','selection','type_server','replica']) == sorted(data.keys()): #insure no extra fields have been added
account_db[data['url']]={}
account_db[data['url']]['primary']=primary_master
account_db[data['url']]['secondary']=secondary_master
account_db[data['url']]['selection']=data['selection']
account_db[data['url']]['type_server']=data['type_server']
account_db[data['url']]['replica']=data['replica']
#we need to set the vlan to 0 so that generate vlan function does not get an incorrect key!
account_db[data['url']]['vlan']=0
vlan_id=generate_vlan_id()
account_db[data['url']]['vlan']=vlan_id
account_db[data['url']]['port']=(vlan_id)*1000+30000 #3x000
account_db[data['url']]['ip']=("10.{}.0.1".format(vlan_id)) #10.x.0.1 virtual ip
with open('cp_account_detail.json','w') as fl:
json.dump(account_db, fl)
cp_version+=1
return Response(json.dumps({'success':True}), status=200, mimetype='application/json')
else :
return Response(json.dumps({'success':False,'content_structure_keys':"[url,selection]"}), status=400, mimetype='application/json')
elif request.method == "GET" :
data=dict(request.json)
data['filename']='cp_account_detail.json'
return (admin_request_file(data))
def acount_data_checks(data):
#return the HTTP - json error and filename
if 'url' not in data.keys(): #check if username field present in json
return (Response(json.dumps({'success':False,'comment':'Need customers username'}), status=400, mimetype='application/json'))
elif 'selection' not in data.keys(): #check if password field present in json
return (Response(json.dumps({'success':False,'comment':'Need Correct selections'}), status=400, mimetype='application/json'))
elif 'type_server' not in data.keys(): #check if type of server field present in json
return (Response(json.dumps({'success':False,'comment':'Need type_server'}), status=400, mimetype='application/json'))
elif 'replica' not in data.keys(): #check if replica field present in json
return (Response(json.dumps({'success':False,'comment':'Need replica sets'}), status=400, mimetype='application/json'))
else:
return (True)
def generate_vlan_id():
global account_db
l=[]
for cust in account_db.keys():
l.append(account_db[cust]['vlan'])
l=sorted(l)
return (l[-1]+1)
@app.route('/account/users',methods=['GET','POST'])
#for account details username and password
def user_details():
global cp_version
global user_db
if request.method == "POST":
data=dict(request.json)
checks=user_data_checks(data)
if checks != True:
return (checks)
else:
user_db[data['username']]={}
user_db[data['username']]['url']=data['url']
user_db[data['username']]['password']=data['password']
user_db[data['username']]['owner']=data['owner']
with open('cp_user_detail.json','w') as fl:
json.dump(user_db, fl)
cp_version+=1
return Response(json.dumps({'success':True}), status=200, mimetype='application/json')
elif request.method == "GET" :
data=dict(request.json)
data['filename']='cp_user_detail.json'
return (admin_request_file(data))
def user_data_checks(data):
#return the HTTP - json error and filename
if 'statistics' not in data.keys(): #check if username field present in json
return (Response(json.dumps({'success':False,'comment':'Need Statistics'}), status=400, mimetype='application/json'))
elif 'username' not in data.keys(): #check if password field present in json
return (Response(json.dumps({'success':False,'comment':'Need customers username'}), status=400, mimetype='application/json'))
elif 'password' not in data.keys(): #check if password field present in json
return (Response(json.dumps({'success':False,'comment':'Need customers password'}), status=400, mimetype='application/json'))
elif sorted(['url','username','password','owner']) == sorted(data.keys()):
return (Response(json.dumps({'success':False,'content_structure_keys':"['url','username','password','owner']"}), status=400, mimetype='application/json'))
else:
return (True)
@app.route('/cp/network',methods=['GET'])
def provide_network_details():
global account_db
global user_db
data=request.json
if sorted(['username','password']) == sorted (data.keys()):
if data['username'] == 'admin' and data['password']== user_db['admin']['password']: # allow only admins
customer_db={}
for customers_url in account_db.keys(): #return only those values which the backend wants from account_db
#restructure the system for cp backend!
customer_db[account_db[customers_url]['ip']]={
'vlan':account_db[customers_url]['vlan'],
'primary':account_db[customers_url]['primary'],
'secondary':account_db[customers_url]['secondary'],
'selection':account_db[customers_url]['selection'],
'type_server':account_db[customers_url]['type_server'],
'replica':account_db[customers_url]['replica'],
'port':account_db[customers_url]['port'],
'url':customers_url}
customer_db=json.dumps(customer_db)
return Response(json.dumps({'success':True,'data':customer_db}), status=200, mimetype='application/json')
else:
return Response(json.dumps({'success':False,'comment':'Unauthorized Access'}), status=400, mimetype='application/json')
else:
return Response(json.dumps({'success':False,'content_structure_keys':"[username,password,selection]"}), status=400, mimetype='application/json')
@app.route('/cp/statistics',methods=['GET','POST'])
#for account details username and password
def statistics_collection():
global cp_version
global statistics_db
if request.method == "POST":
data=dict(request.json)
checks=statistics_collection_check(data)
if checks != True:
return (checks)
else:
with open('cp_statistics.json','w') as fl:
json.dump(data, fl)
cp_version+=1
return Response(json.dumps({'success':True}), status=200, mimetype='application/json')
elif request.method == "GET" :
data=dict(request.json)
data['filename']='cp_statistics.json'
return (admin_request_file(data))
def statistics_collection_check(data):
#return the HTTP - json error and filename
if 'statistics' not in data.keys(): #check if username field present in json
return (Response(json.dumps({'success':False,'comment':'Need statistics field'}), status=400, mimetype='application/json'))
elif 'username' not in data.keys(): #check if password field present in json
return (Response(json.dumps({'success':False,'comment':'Need admin username'}), status=400, mimetype='application/json'))
elif 'password' not in data.keys(): #check if password field present in json
return (Response(json.dumps({'success':False,'comment':'Need admin password'}), status=400, mimetype='application/json'))
elif data['username'] =='admin' and data['password'] == user_db['admin']['password']:
return (True)
else:
return (False)
#cp file synch
@app.route('/synch',methods=['GET'])
def synch_list():
data=cp_file_list()
return Response(json.dumps({'success':True,'data':data}), status=200, mimetype='application/json')
@app.route('/synch/files',methods=['GET'])
def synch_file():
data=dict(request.json)
return (admin_request_file(data))
def synch_database():
global secondary_server
global cp_version
global account_db
global user_db
while True:
try:
r = requests.get('http://'+secondary_server+'/api/version')
if r.status_code == 200:
secondary_seq_number= r.json()['cp_version']
print ("Secondary Server sequence number: {}".format(secondary_seq_number))
if secondary_seq_number == cp_version:
print ("Everything is synch... sleeping for 10 sec")
time.sleep (10)
else:
#request for the account file:
account_file=requests.get('http://'+secondary_server+'/synch/files',json={'username':'admin','password':user_db['admin']['password'],'filename':'cp_account_detail.json'})
if account_file.status_code == 200:
account_file=account_file.json()
if 'data' in account_file.keys():
account_db=account_file['data']
with open('cp_account_detail.json','w') as fl:
json.dump(account_db, fl)
#request for the account file:
user_file=requests.get('http://'+secondary_server+'/synch/files',json={'username':'admin','password':user_db['admin']['password'],'filename':'cp_user_detail.json'})
if user_file.status_code == 200:
user_file=user_file.json()
if 'data' in user_file.keys():
user_db=user_file['data']
with open('cp_user_detail.json','w') as fl:
json.dump(user_db, fl)
#request cp_statistics file:
statistics_file=requests.get('http://'+secondary_server+'/synch/files',json={'username':'admin','password':user_db['admin']['password'],'filename':'cp_statistics.json'})
if statistics_file.status_code == 200:
statistics_file=statistics_file.json()
if 'data' in statistics_file.keys():
statistics_db=statistics_file['data']
with open('cp_statistics.json','w') as fl:
json.dump(statistics_db, fl)
cp_version=secondary_seq_number # change in sequence number if you receive all three file
#getting the files missing
cp_files=cp_file_list()
server_files=requests.get('http://'+secondary_server+'/synch',json={'username':'admin','password':user_db['admin']['password']})
if server_files.status_code == 200:
server_files=server_files.json()['data']
for fls in cp_files: #those files which are already present in the system!
server_files.remove(fls)
if server_files != []:
for fls in server_files:
#get the files one by one!
new_fls=requests.get('http://'+secondary_server+'/synch/files',json={'username':'admin','password':user_db['admin']['password'],'filename':fls})
if new_fls.status_code == 200:
new_fls=new_fls.json()
if 'data' in new_fls.keys():
new_fls=new_fls['data']
with open(fls,'w') as fl:
json.dump(new_fls, fl)
print ("GOT A NEW FILE: {}".format(fls))
time.sleep(2)
time.sleep(10) #just sleep after one complete loop of while!
except Exception as E: # if the request is unable to follow
#print ("Error in connecting with secondary_server:{} | {}".format(secondary_server,E))
print ("Error in connecting with secondary_server:{}".format(secondary_server))
time.sleep(30)
def cp_file_list():
all_files=os.listdir("./")
cp_files=[]
for fls in all_files:
if fls.startswith("cp_"):
cp_files.append(fls)
print (cp_files)
return (cp_files)
def init_start():
global cp_version
global account_db
global user_db
global statistics_db
global primary_master
global secondary_master
#setting the service
cp_version=0
cp_version_request = [
{
'cp_version':cp_version
}
]
#Getting the account details from the file!
if os.path.isfile("./cp_account_detail.json"):
with open('cp_account_detail.json','r') as fl:
account_db = json.load(fl)
for url in account_db.keys():
cp_version+=1
print (account_db)
else:
account_db={
'cpnetworks':{
'vlan':1,
'primary':primary_master,
'secondary':secondary_master,
'selection':'active-active',
'type_server': 'differet',
'replica':1,
'port':30001,
'ip':'10.1.0.1'
}
}
with open('cp_account_detail.json','w') as fl:
json.dump(account_db, fl)
#Getting User detail from the file!
if os.path.isfile("./cp_user_detail.json"):
with open('cp_user_detail.json','r') as fl:
user_db = json.load(fl)
for users in user_db.keys():
cp_version+=1
print (user_db)
else:
user_db={
'admin':{
'password':'admin',
'url':'cpnetworks',
'owner':True
}
}
with open('cp_user_detail.json','w') as fl:
json.dump(user_db, fl)
#getting url target hits:
if os.path.isfile("./cp_statistics.json"):
with open('cp_statistics.json','r') as fl:
statistics_db = json.load(fl)
cp_version+=1
else:
statistics_db={
'statistics':{},
'other': {}
}
with open('cp_statistics.json','w') as fl:
json.dump(statistics_db, fl)
t=threading.Thread(target=synch_database,args = ())
t.daemon=True
t.start()
return ()
if __name__ == '__main__':
primary_master='192.168.0.1'
secondary_master='192.168.0.2'
secondary_server="127.0.0.1:9292"
cp_version=0
account_db={}
user_db={}
statistics_db={}
init_start()
app.run(debug=True, host='0.0.0.0', port=9191)
|
master_sync.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
import threading
import time
import uuid
import xmlrpclib
from fkie_multimaster_msgs.msg import MasterState # , LinkState, LinkStatesStamped, MasterState, ROSMaster, SyncMasterInfo, SyncTopicInfo
from fkie_multimaster_msgs.srv import DiscoverMasters, GetSyncInfo, GetSyncInfoResponse
import rospy
from fkie_master_discovery.common import masteruri_from_master, resolve_url, read_interface, create_pattern, is_empty_pattern
from fkie_master_discovery.master_info import MasterInfo
import fkie_master_discovery.interface_finder as interface_finder
from .sync_thread import SyncThread
class Main(object):
'''
'''
UPDATE_INTERVALL = 30
def __init__(self):
'''
Creates a new instance. Find the topic of the master_discovery node using
U{fkie_master_discovery.interface_finder.get_changes_topic()
<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#interface-finder-module>}.
Also the parameter C{~ignore_hosts} will be analyzed to exclude hosts from sync.
'''
self.masters = {}
# the connection to the local service master
self.materuri = masteruri_from_master()
'''@ivar: the ROS master URI of the C{local} ROS master. '''
self.__lock = threading.RLock()
# load interface
self._load_interface()
# subscribe to changes notifier topics
self._check_host = rospy.get_param('~check_host', True)
topic_names = interface_finder.get_changes_topic(masteruri_from_master(), check_host=self._check_host)
self.sub_changes = dict()
'''@ivar: `dict` with topics {name: U{rospy.Subscriber<http://docs.ros.org/api/rospy/html/rospy.topics.Subscriber-class.html>}} publishes the changes of the discovered ROS masters.'''
for topic_name in topic_names:
rospy.loginfo("listen for updates on %s", topic_name)
self.sub_changes[topic_name] = rospy.Subscriber(topic_name, MasterState, self._rosmsg_callback_master_state)
self.__timestamp_local = None
self.__own_state = None
self.update_timer = None
self.own_state_getter = None
self._join_threads = dict() # threads waiting for stopping the sync thread
# initialize the ROS services
rospy.Service('~get_sync_info', GetSyncInfo, self._rosservice_get_sync_info)
rospy.on_shutdown(self.finish)
self.obtain_masters()
def _rosmsg_callback_master_state(self, data):
'''
The method to handle the received MasterState messages. Based on this message
new threads to synchronize with remote ROS master will be created, updated or
removed.
@param data: the received message
@type data: U{fkie_master_discovery.MasterState
<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/MasterState.html>}
'''
with self.__lock:
if not rospy.is_shutdown():
if data.state in [MasterState.STATE_REMOVED]:
self.remove_master(data.master.name)
elif data.state in [MasterState.STATE_NEW, MasterState.STATE_CHANGED]:
m = data.master
self.update_master(m.name, m.uri, m.timestamp, m.timestamp_local, m.discoverer_name, m.monitoruri, m.online)
def obtain_masters(self):
'''
This method use the service 'list_masters' of the master_discoverer to get
the list of discovered ROS master. Based on this list the L{SyncThread} for
synchronization will be created.
@see: U{fkie_master_discovery.interface_finder.get_listmaster_service()
<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#interface-finder-module>}
'''
if not rospy.is_shutdown():
service_names = interface_finder.get_listmaster_service(masteruri_from_master(), False, check_host=self._check_host)
for service_name in service_names:
try:
with self.__lock:
try:
socket.setdefaulttimeout(5)
discoverMasters = rospy.ServiceProxy(service_name, DiscoverMasters)
resp = discoverMasters()
masters = []
master_names = [m.name for m in resp.masters]
rospy.loginfo("ROS masters obtained from '%s': %s", service_name, master_names)
for m in resp.masters:
if self._can_sync(m.name): # do not sync to the master, if it is in ignore list or not in filled sync list
masters.append(m.name)
self.update_master(m.name, m.uri, m.timestamp, m.timestamp_local, m.discoverer_name, m.monitoruri, m.online)
for key in set(self.masters.keys()) - set(masters):
self.remove_master(self.masters[key].name)
except rospy.ServiceException, e:
rospy.logwarn("ERROR Service call 'list_masters' failed: %s", str(e))
except:
import traceback
rospy.logwarn("ERROR while initial list masters: %s", traceback.format_exc())
finally:
socket.setdefaulttimeout(None)
self.update_timer = threading.Timer(self.UPDATE_INTERVALL, self.obtain_masters)
self.update_timer.start()
def update_master(self, mastername, masteruri, timestamp, timestamp_local, discoverer_name, monitoruri, online):
'''
Updates the timestamp of the given ROS master, or creates a new L{SyncThread} to
synchronize the local master with given ROS master.
@param mastername: the name of the remote ROS master to update or synchronize.
@type mastername: C{str}
@param masteruri: the URI of the remote ROS master.
@type masteruri: C{str}
@param timestamp: the timestamp of the remote ROS master.
@type timestamp: C{float64}
@param timestamp_local: the timestamp of the remote ROS master. (only local changes)
@type timestamp_local: C{float64}
@param discoverer_name: the name of the remote master_discoverer node
@type discoverer_name: C{str}
@param monitoruri: the URI of the RPC interface of the remote master_discoverer node.
@type monitoruri: C{str}
@param online: the current state on the master.
@type online: C{bool}
'''
try:
with self.__lock:
if (masteruri != self.materuri):
if self._can_sync(mastername):
# do not sync to the master, if it is in ignore list
if self.__resync_on_reconnect and mastername in self.masters:
self.masters[mastername].set_online(online, self.__resync_on_reconnect_timeout)
if online:
if mastername in self.masters:
# updates only, if local changes are occured
self.masters[mastername].update(mastername, masteruri, discoverer_name, monitoruri, timestamp_local)
else:
self.masters[mastername] = SyncThread(mastername, masteruri, discoverer_name, monitoruri, 0.0, self.__sync_topics_on_demand)
if self.__own_state is not None:
self.masters[mastername].set_own_masterstate(MasterInfo.from_list(self.__own_state))
self.masters[mastername].update(mastername, masteruri, discoverer_name, monitoruri, timestamp_local)
elif self.__timestamp_local != timestamp_local and self.__sync_topics_on_demand:
# get the master info from local discovery master and set it to all sync threads
self.own_state_getter = threading.Thread(target=self.get_own_state, args=(monitoruri,))
self.own_state_getter.start()
except:
import traceback
rospy.logwarn("ERROR while update master[%s]: %s", str(mastername), traceback.format_exc())
def get_own_state(self, monitoruri):
'''
Gets the master info from local master discovery and set it to all sync threads.
This function is running in a thread!!!
'''
try:
socket.setdefaulttimeout(3)
own_monitor = xmlrpclib.ServerProxy(monitoruri)
self.__own_state = own_monitor.masterInfo()
own_state = MasterInfo.from_list(self.__own_state)
socket.setdefaulttimeout(None)
with self.__lock:
# update the state for all sync threads
for (_, s) in self.masters.iteritems():
s.set_own_masterstate(own_state, self.__sync_topics_on_demand)
self.__timestamp_local = own_state.timestamp_local
except:
import traceback
rospy.logwarn("ERROR while getting own state from '%s': %s", monitoruri, traceback.format_exc())
socket.setdefaulttimeout(None)
time.sleep(3)
if self.own_state_getter is not None and not rospy.is_shutdown():
self.own_state_getter = threading.Thread(target=self.get_own_state, args=(monitoruri,))
self.own_state_getter.start()
def remove_master(self, ros_master_name):
'''
Removes the master with given name from the synchronization list.
@param ros_master_name: the name of the ROS master to remove.
@type ros_master_name: C{str}
'''
try:
with self.__lock:
if ros_master_name in self.masters:
m = self.masters.pop(ros_master_name)
ident = uuid.uuid4()
thread = threading.Thread(target=self._threading_stop_sync, args=(m, ident))
self._join_threads[ident] = thread
thread.start()
except Exception:
import traceback
rospy.logwarn("ERROR while removing master[%s]: %s", ros_master_name, traceback.format_exc())
def _threading_stop_sync(self, sync_thread, ident):
if isinstance(sync_thread, SyncThread):
rospy.loginfo(" Stop synchronization to `%s`" % sync_thread.name)
sync_thread.stop()
with self.__lock:
del self._join_threads[ident]
rospy.loginfo(" Finished synchronization to `%s`" % sync_thread.name)
del sync_thread
def finish(self, msg=''):
'''
Removes all remote masters and unregister their topics and services.
'''
rospy.loginfo("Stop synchronization...")
with self.__lock:
# stop update timer
rospy.loginfo(" Stop timers...")
if self.update_timer is not None:
self.update_timer.cancel()
# unregister from update topics
rospy.loginfo(" Unregister from master discovery...")
for (_, v) in self.sub_changes.iteritems():
v.unregister()
self.own_state_getter = None
# Stop all sync threads
for key in self.masters.keys():
rospy.loginfo(" Remove master: %s", key)
self.remove_master(key)
# wait for their ending
while len(self._join_threads) > 0:
rospy.loginfo(" Wait for ending of %s threads ...", str(len(self._join_threads)))
time.sleep(1)
rospy.loginfo("Synchronization is now off")
def _rosservice_get_sync_info(self, req):
'''
Callback for the ROS service to get the info to synchronized nodes.
'''
masters = list()
try:
with self.__lock:
for (_, s) in self.masters.iteritems():
masters.append(s.get_sync_info())
except:
import traceback
traceback.print_exc()
finally:
return GetSyncInfoResponse(masters)
def _load_interface(self):
interface_file = resolve_url(rospy.get_param('~interface_url', ''))
if interface_file:
rospy.loginfo("interface_url: %s", interface_file)
try:
data = read_interface(interface_file) if interface_file else {}
# set the ignore hosts list
self._re_ignore_hosts = create_pattern('ignore_hosts', data, interface_file, [])
# set the sync hosts list
self._re_sync_hosts = create_pattern('sync_hosts', data, interface_file, [])
self.__sync_topics_on_demand = False
if interface_file:
if 'sync_topics_on_demand' in data:
self.__sync_topics_on_demand = data['sync_topics_on_demand']
elif rospy.has_param('~sync_topics_on_demand'):
self.__sync_topics_on_demand = rospy.get_param('~sync_topics_on_demand')
rospy.loginfo("sync_topics_on_demand: %s", self.__sync_topics_on_demand)
self.__resync_on_reconnect = rospy.get_param('~resync_on_reconnect', True)
rospy.loginfo("resync_on_reconnect: %s", self.__resync_on_reconnect)
self.__resync_on_reconnect_timeout = rospy.get_param('~resync_on_reconnect_timeout', 0)
rospy.loginfo("resync_on_reconnect_timeout: %s", self.__resync_on_reconnect_timeout)
except:
import traceback
# kill the ros node, to notify the user about the error
rospy.logerr("Error on load interface: %s", traceback.format_exc())
import os
import signal
os.kill(os.getpid(), signal.SIGKILL)
def _can_sync(self, mastername):
result = False
if is_empty_pattern(self._re_ignore_hosts):
if is_empty_pattern(self._re_sync_hosts):
result = True
elif self._re_sync_hosts.match(mastername) is not None:
result = True
elif self._re_ignore_hosts.match(mastername) is None:
result = True
elif not is_empty_pattern(self._re_sync_hosts):
if self._re_sync_hosts.match(mastername) is not None:
result = True
return result
|
19.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import time
from threading import Thread
now = lambda: time.time()
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
def more_work(x):
print('More work {}'.format(x))
time.sleep(x)
print('Finished more work {}'.format(x))
start = now()
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,))
t.start()
print('TIME: {}'.format(time.time() - start))
new_loop.call_soon_threadsafe(more_work, 6)
new_loop.call_soon_threadsafe(more_work, 3)
|
PopUp.py
|
#!/usr/bin/python3
from __future__ import print_function, division
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
import multiprocessing as mp
import sys
from time import sleep
import ctypes
# User32.dll
if getattr(ctypes, "windll", False):
User32 = ctypes.windll.User32
else:
User32 = None
LOG_FILE_NAME = "Log-PopUpNotification.log"
def get_dimension_using_Tk(root, default_placing_manager,
string, font, font_size):
longest_line_label =\
tk.Label(root, text=string, bg="black", fg="grey",
font=(font, font_size))
# longest_line_label.configure(anchor="center")
try:
getattr(longest_line_label, default_placing_manager)()
except AttributeError:
print("Only (pack|grid|place) are allowed as default_placing_manager")
return -1, -1
longest_line_label.update_idletasks()
width = longest_line_label.winfo_width()
height = longest_line_label.winfo_height()
longest_line_label.destroy()
return width, height
def show_messagebox(title, message):
FONT_SIZE = 16
PAD_X = 20
PAD_Y = 10
main_window = tk.Tk()
main_window.overrideredirect(1)
main_window.call("wm", "attributes", ".", "-topmost", "true")
main_window.attributes('-alpha', 0.8)
main_window.config(bg="black")
main_frame = tk.Frame(main_window)
main_frame.configure(bg="black")
main_frame.grid()
all_lines = []
all_lines.extend(title.split("\n"))
all_lines.append("\n")
all_lines.extend(message.split("\n"))
number_of_lines = len(all_lines)
longest_line = ""
for _, line in enumerate(all_lines):
if len(line) > len(longest_line):
longest_line = line
longest_line_width, _ = get_dimension_using_Tk(main_frame, "grid",
longest_line,
"Times New Roman",
FONT_SIZE)
longest_line_width += 2 * PAD_X
line_height = FONT_SIZE * 1.50
all_lines_height = int(number_of_lines * line_height)
all_lines_height += 2 * PAD_Y
all_lines_height += 10
least_width = 400
least_height = 65
least_width = least_width\
if longest_line_width < least_width else longest_line_width
least_height = least_height\
if all_lines_height < least_height else all_lines_height
# Smooth entry
width = 0
while width < least_width:
main_window.geometry("{}x{}-0-40".format(width, least_height))
main_window.update()
main_window.update_idletasks()
width += 20
title_label =\
tk.Label(main_window, text=title, bg="black", fg="silver",
font=("Times New Roman", FONT_SIZE))
# title_label.configure(anchor="w")
title_label.grid(row=0, column=0, padx=PAD_X-5, pady=PAD_Y+5, sticky="W")
# title_label.pack(padx=20)
message_label =\
tk.Label(main_window, text=message, bg="black", fg="grey",
font=("Times New Roman", FONT_SIZE))
message_label.configure(anchor="center")
message_label.grid(row=2, column=0, padx=PAD_X, pady=PAD_Y)
# message_label.pack()
main_window.mainloop()
def show_notification(title, message, SECONDS=3, is_daemon=True, LOG=False):
# Check if user is not locked
if User32 is not None:
while User32.GetForegroundWindow() == 0:
sleep(5)
if LOG:
f = open(LOG_FILE_NAME, "a")
f.write("####START_ENTRY####\n")
f.write(title)
f.write("\n")
f.write(message)
f.write("####END_ENTRY####\n")
f.close()
box_process = mp.Process(target=show_messagebox, args=(title, message))
box_process.daemon = is_daemon
box_process.start()
sleep(SECONDS)
box_process.terminate()
if getattr(sys, "frozen", False):
mp.freeze_support()
|
interprocess.py
|
'''
this is to make sure our design for interprocess communication will work
we want to have multiple threads modifying and reading the same objects...
if this works well we could perhaps replace the loops with listeners and
behavior subjects as streams.
'''
import threading
import time
import datetime as dt
class DataManager:
def __init__(self):
self.updates = {}
self.availableInputs = [0,1,2,3,4,5,6,7,8,9]
self.helperTextCounter = 0
def runSubscriber(self, models):
self.helperTextCounter += 1 # only one stream updates
if self.helperTextCounter not in self.availableInputs:
self.helperTextCounter = 1
self.updates[self.helperTextCounter] = str(dt.datetime.utcnow().second)
for model in models:
if self.helperTextCounter in model.inputs:
model.inputsUpdated = True
def runPublisher(self, models):
for model in models:
if model.predictionUpdated == True:
model.predictionUpdated = False
print(f'Publishing: {model.name()}: {model.prediction}')
def runScholar(self, models):
newInput = self.availableInputs[-1] + 1
self.availableInputs.append(newInput)
for model in models:
model.newAvailableInputs.append(newInput)
print(f'runScholar - {model.name()}: {model.inputs}')
class ModelManager:
def __init__(self, name, inputs):
self.targetKey = name
self.inputs = [1,2,3]
self.model = None
self.prediction = None
self.updates = {}
# flags
self.modelUpdated = False
self.inputsUpdated = False
self.predictionUpdated = False
self.newAvailableInputs = []
def name(self):
return self.targetKey
def runPredictor(self, data):
# this would avoid two things writing at the same time maybe? replace flags with logic like this if its a problem.
# all([True if data.updates[i] == self.lastUpdates[i] else False for i in self.inputs])
# or we could have two flags - write on our own system read on the other
if self.model != None and (self.modelUpdated or self.inputsUpdated):
if self.modelUpdated:
self.modelUpdated = False
if self.inputsUpdated:
self.inputsUpdated = False
self.prediction = str(dt.datetime.utcnow().second)
self.predictionUpdated = True
for i in self.inputs:
self.updates[i] = data.updates.get(i)
print(f'{self.targetKey} using: {self.model} with: {self.updates} prediction: {self.prediction}')
def runExplorer(self, data):
if self.newAvailableInputs != []:
self.inputs = self.inputs + self.newAvailableInputs
self.newAvailableInputs = []
self.model = str(dt.datetime.utcnow())
self.modelUpdated = True
print(f'{self.targetKey} runExplorer')
class Learner:
def __init__(
self,
data:DataManager=None,
model:ModelManager=None,
models:'set(ModelManager)'=None,
):
'''
data - a DataManager for the data
model - a ModelManager for the model
models - a list of ModelManagers
'''
self.data = data
self.models = models
if model is not None:
self.models = {self.models + [model]}
def run(self):
'''
Main Loops - one for each model and one for the data manager.
'''
def subscriber():
''' loop for data '''
def rest():
x = 9
if x == -1:
while True:
time.sleep(60*60)
time.sleep(x)
while True:
#rest()
self.data.runSubscriber(self.models)
def publisher():
''' loop for data '''
def rest():
x = 1
if x == -1:
while True:
time.sleep(60*60)
time.sleep(x)
while True:
#rest()
self.data.runPublisher(self.models)
def scholar():
''' loop for data '''
def rest():
x = 30
if x == -1:
while True:
time.sleep(60*60)
time.sleep(x)
while True:
#rest()
self.data.runScholar(self.models)
def predictor(model:ModelManager):
''' loop for producing predictions '''
def rest():
x = 4
if x == -1:
while True:
time.sleep(60*60)
time.sleep(x)
while True:
#rest()
model.runPredictor(self.data)
def explorer(model:ModelManager):
''' loop for producing models '''
def rest():
x = 13
if x == -1:
while True:
time.sleep(60*60)
time.sleep(x)
while True:
#rest()
model.runExplorer(self.data)
threads = {}
threads['subscriber'] = threading.Thread(target=subscriber, daemon=True)
threads['publisher'] = threading.Thread(target=publisher, daemon=True)
threads['scholar'] = threading.Thread(target=scholar, daemon=True)
predictions = {}
scores = {}
inputs = {}
for model in self.models:
threads[f'{model.targetKey}.predictor'] = threading.Thread(target=predictor, args=[model], daemon=True)
threads[f'{model.targetKey}.explorer'] = threading.Thread(target=explorer, args=[model], daemon=True)
predictions[model.targetKey] = ''
scores[model.targetKey] = ''
inputs[model.targetKey] = []
for thread in threads.values():
print('starting')
thread.start()
while threading.active_count() > 0:
time.sleep(0)
# python .\tests\scratch\interprocess.py
learner = Learner(
data=DataManager(),
models={
ModelManager(name='A', inputs=[1,2,3]),
ModelManager(name='B', inputs=[2,3,4]),
ModelManager(name='C', inputs=[3,5,6])
}
)
learner.run()
|
library.py
|
import glob
import os
import random
import time
from threading import Thread
from itertools import chain
from more_itertools import peekable
import re
from typing import List, Iterable
class ClipLibrary:
def __init__(self, folder: str, log: bool = True, auto_update: bool = True):
if log:
print("Building clip library...")
self.hosts = ClipPool(os.path.join(folder, "hosts"))
self.music = ClipPool(os.path.join(folder, "music"))
self.night = ClipPool(os.path.join(folder, "night"))
self.other = ClipPool(folder)
self.folder = folder
self.abs_path = os.path.abspath(folder)
if log:
print(" ->", self.music.size() + self.night.size(), "songs")
print(" ->", self.hosts.size(), "host clips")
if auto_update:
Thread(target=self._update_thread, name="LibUpdateThread", daemon=True).start()
def update(self) -> None:
print("Updating library...")
self.hosts.scan()
self.music.scan()
self.night.scan()
self.other.scan()
def _update_thread(self) -> None:
while(True):
# wait 30min
time.sleep(30 * 60)
# update library
self.update()
def _filter(self, search: str) -> Iterable[str]:
return chain(
self.music.filter(search),
self.night.filter(search),
self.other.filter(search)
)
def search_clips(self, search: str, short_path: bool = False) -> List[str]:
# get all paths matching the search term
raw_results = peekable(self._filter(search))
# do extended search if there are no matches
if raw_results.peek(None) is None:
delimiters = [".", " ", "-", "_"]
search_parts = list(filter(
lambda s: len(s.strip()) > 0,
re.split("|".join(map(re.escape, delimiters)), search)
))
if len(search_parts) > 0 and (search is not search_parts[0]):
parts = iter(search_parts)
results = self._filter(next(parts))
for search_part in parts:
results = filter(
lambda x: search_part in x.lower(),
results
)
raw_results = peekable(results)
# return only relative paths if short_path is true
n = 0
if short_path:
n = len(self.folder)
# also remove /
if not self.folder[-1] == os.sep:
n += 1
clean_results = map(lambda x: x[n:], raw_results)
return list(clean_results)
class ClipPool:
def __init__(self, folder: str):
assert os.path.exists(folder), "The folder for this ClipPool does not exist"
self.clips: List[str] = []
self._history: List[int] = []
self._history_len: int = 0
self.folder = folder
self.scan()
def empty(self) -> bool:
return len(self.clips) == 0
def next(self) -> str:
assert not self.empty(), "Cannot pick clip from empty pool"
# find a clip that is not in the recent history
idx = random.randrange(0, len(self.clips))
while idx in self._history:
idx = random.randrange(0, len(self.clips))
# add to recent history
self._history.append(idx)
if len(self._history) > self._history_len:
del self._history[0]
return self.clips[idx]
def filter(self, search: str) -> Iterable[str]:
ls = search.lower()
return filter(
lambda x: ls in x.lower(),
self.clips
)
def size(self) -> int:
return len(self.clips)
def scan(self) -> None:
self.clips = glob.glob(os.path.join(self.folder, "*.*"))
size = len(self.clips)
self._history_len = min(size - 1, min(max(size//10, 10), 42))
self._history = []
|
handythread.py
|
import sys
import time
import threading
import itertools
zip = getattr(itertools, 'izip', zip)
#from itertools import izip, count
#http://wiki.scipy.org/Cookbook/Multithreading
#https://github.com/scipy/scipy-cookbook/blob/master/ipython/attachments/Multithreading/handythread.py
def foreach(f,l,threads=3,return_=False):
"""
Apply f to each element of l, in parallel
"""
if threads>1:
iteratorlock = threading.Lock()
exceptions = []
if return_:
n = 0
d = {}
i = zip(itertools.count(),l.__iter__())
else:
i = l.__iter__()
def runall():
while True:
iteratorlock.acquire()
try:
try:
if exceptions:
return
v = next(i)#.next()
finally:
iteratorlock.release()
except StopIteration:
return
try:
if return_:
n,x = v
d[n] = f(x)
else:
f(v)
except:
e = sys.exc_info()
iteratorlock.acquire()
try:
print(e)
exceptions.append(e)
finally:
iteratorlock.release()
threadlist = [threading.Thread(target=runall) for j in range(threads)]
for t in threadlist:
t.start()
for t in threadlist:
t.join()
if exceptions:
a, b, c = exceptions[0]
raise(a, b, c)
if return_:
r = sorted(d.items())
# r.sort()
return [v for (n,v) in r]
else:
if return_:
return [f(v) for v in l]
else:
for v in l:
f(v)
return
def parallel_map(f,l,threads=3):
return foreach(f,l,threads=threads,return_=True)
|
decorators.py
|
import functools
import resource
from matplotlib import pyplot as pd
from threading import Thread
from datetime import datetime
from time import sleep
from .templates import time_result
def monitor(measure):
def time_monitor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = datetime.now()
func(*args, **kwargs)
end_time = datetime.now()
print(time_result.format(
start_time=start_time.time(),
end_time=end_time.time(),
process_time=end_time - start_time,
))
return wrapper
def cpu_monitor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
thread = Thread(target=func)
thread.start()
# TODO: override if provided
refresh_period = 0.1 # Seconds
cpu_usage = dict()
while thread.is_alive():
sleep(refresh_period)
cpu_usage[datetime.now()] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print("======================================")
for time, usage in cpu_usage.items():
print(time.time(), ": ", usage)
pd.plot(cpu_usage.keys(), cpu_usage.values())
try:
pd.savefig("figures/figure.png")
except FileNotFoundError:
import os
os.mkdir("figures")
pd.savefig("figures/figure.png")
return wrapper
monitor_map = dict(
time=time_monitor,
cpu=cpu_monitor,
)
if measure not in monitor_map.keys():
raise AssertionError(
f"'{measure}' is not a valid measure! "
f"Please choose a correct measure: {list(monitor_map.keys())}"
)
return monitor_map[measure]
|
multi_echo_server.py
|
import socket
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def main():
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.bind((HOST,PORT))
s.listen(10)
while True:
conn, addr = s.accept()
p = Process(target = handle_echo, args = (addr, conn))
p.daemon = True
p.start()
print("Started process ", p)
def handle_echo(addr, conn):
print("Connected by" , addr)
full_data = conn.recv(BUFFER_SIZE)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_WR)
conn.close()
if __name__ == "__main__":
main()
|
server.py
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import random
import threading
import os
from collections import defaultdict
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.server.server import Server
import jinja2
from tornado import web, ioloop
from .. import kvstore
from ..compat import six
from ..utils import get_next_port
from ..config import options
from ..scheduler import GraphActor, ResourceActor
from ..api import MarsAPI
logger = logging.getLogger(__name__)
def get_jinja_env():
from datetime import datetime
from ..utils import readable_size
_jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
)
def format_ts(value):
if value is None:
return None
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
_jinja_env.filters['format_ts'] = format_ts
_jinja_env.filters['readable_size'] = readable_size
return _jinja_env
class BokehStaticFileHandler(web.StaticFileHandler):
@classmethod
def get_absolute_path(cls, root, path):
from bokeh import server
path_parts = path.rsplit('/', 1)
if 'bokeh' in path_parts[-1]:
root = os.path.join(os.path.dirname(server.__file__), "static")
return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)
def validate_absolute_path(self, root, absolute_path):
from bokeh import server
path_parts = absolute_path.rsplit('/', 1)
if 'bokeh' in path_parts[-1]:
root = os.path.join(os.path.dirname(server.__file__), "static")
return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)
class MarsWebAPI(MarsAPI):
def __init__(self, scheduler_ip):
super(MarsWebAPI, self).__init__(scheduler_ip)
def get_tasks_info(self):
sessions = defaultdict(dict)
for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):
session_desc = sessions[session_id]
session_desc['id'] = session_id
session_desc['name'] = session_id
session_desc['tasks'] = dict()
session_ref = self.actor_client.actor_ref(session_ref)
for graph_key, graph_ref in six.iteritems(session_ref.get_graph_refs()):
task_desc = dict()
state = self.kv_store.read(
'/sessions/%s/graph/%s/state' % (session_id, graph_key)).value
if state == 'PREPARING':
task_desc['state'] = state.lower()
session_desc['tasks'][graph_key] = task_desc
continue
graph_ref = self.actor_client.actor_ref(graph_ref)
task_desc['id'] = graph_key
task_desc['state'] = graph_ref.get_state().value
start_time, end_time, graph_size = graph_ref.get_graph_info()
task_desc['start_time'] = start_time
task_desc['end_time'] = end_time or 'N/A'
task_desc['graph_size'] = graph_size or 'N/A'
session_desc['tasks'][graph_key] = task_desc
return sessions
def get_task_detail(self, session_id, task_id):
graph_uid = GraphActor.gen_name(session_id, task_id)
graph_ref = self.get_actor_ref(graph_uid)
return graph_ref.calc_stats()
def get_workers_meta(self):
resource_uid = ResourceActor.default_name()
resource_ref = self.get_actor_ref(resource_uid)
return resource_ref.get_workers_meta()
class MarsWeb(object):
def __init__(self, port=None, scheduler_ip=None):
self._port = port
self._scheduler_ip = scheduler_ip
self._server = None
self._server_thread = None
@property
def port(self):
return self._port
def start(self, event=None, block=False):
try:
ioloop.IOLoop.current()
except RuntimeError:
if six.PY3:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
loop = None
try:
loop = ioloop.IOLoop.current()
except:
pass
if loop is None:
raise
else:
raise
if self._scheduler_ip is None:
kv_store = kvstore.get(options.kv_store)
try:
schedulers = [s.key.rsplit('/', 1)[1] for s in kv_store.read('/schedulers').children]
self._scheduler_ip = random.choice(schedulers)
except KeyError:
raise KeyError('No scheduler is available')
static_path = os.path.join(os.path.dirname(__file__), 'static')
handlers = dict()
for p, h in _ui_handlers.items():
handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))
extra_patterns = [
('/static/(.*)', BokehStaticFileHandler, {'path': static_path})
]
for p, h in _api_handlers.items():
extra_patterns.append((p, h, {'scheduler_ip': self._scheduler_ip}))
retrial = 5
while retrial:
try:
if self._port is None:
use_port = get_next_port()
else:
use_port = self._port
self._server = Server(
handlers, allow_websocket_origin=['*'],
address='0.0.0.0', port=use_port,
extra_patterns=extra_patterns,
)
self._server.start()
self._port = use_port
logger.info('Mars UI started at 0.0.0.0:%d', self._port)
break
except:
if self._port is not None:
raise
retrial -= 1
if retrial == 0:
raise
if not block:
self._server_thread = threading.Thread(target=self._server.io_loop.start)
self._server_thread.daemon = True
self._server_thread.start()
if event:
event.set()
else:
if event:
event.set()
self._server.io_loop.start()
def stop(self):
if self._server is not None:
self._server.io_loop.stop()
self._server.stop()
_ui_handlers = dict()
_api_handlers = dict()
def register_ui_handler(pattern, handler):
_ui_handlers[pattern] = handler
def register_api_handler(pattern, handler):
_api_handlers[pattern] = handler
|
clientserver.py
|
# -*- coding: UTF-8 -*-
"""Module that implements a different threading model between
a Java Virtual Machine a Python interpreter.
In this model, Java and Python can exchange resquests and responses in the same
thread. For example, if a request is started in a Java UI thread and the Python
code calls some Java code, the Java code will be executed in the UI thread.
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import socket
from threading import local, Thread
import time
import traceback
import weakref
from py4j.java_gateway import (
quiet_close, quiet_shutdown,
set_linger, GatewayClient, JavaGateway,
CallbackServerParameters, GatewayParameters, CallbackServer,
GatewayConnectionGuard, DEFAULT_ADDRESS, DEFAULT_PORT,
DEFAULT_PYTHON_PROXY_PORT, DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
server_connection_stopped, do_client_auth, _garbage_collect_proxy)
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JNetworkError, smart_decode, get_command_part,
get_return_value, Py4JAuthenticationError)
logger = logging.getLogger("py4j.clientserver")
SHUTDOWN_FINALIZER_WORKER = "__shutdown__"
DEFAULT_WORKER_SLEEP_TIME = 1
class FinalizerWorker(Thread):
def __init__(self, deque):
self.deque = deque
super(FinalizerWorker, self).__init__()
def run(self):
while(True):
try:
task = self.deque.pop()
if task == SHUTDOWN_FINALIZER_WORKER:
break
else:
(java_client, target_id) = task
java_client.garbage_collect_object(
target_id, False)
except IndexError:
time.sleep(DEFAULT_WORKER_SLEEP_TIME)
class JavaParameters(GatewayParameters):
"""Wrapper class that contains all parameters that can be passed to
configure a `ClientServer`.`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None, enable_memory_management=True, auto_gc=False,
read_timeout=None, daemonize_memory_management=True,
auth_token=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with
`check_hostname=True` then this address must match
(one of) the hostname(s) in the certificate the gateway
server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method
calls less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect
to the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
:param enable_memory_management: if True, tells the Java side when a
JavaObject (reference to an object on the Java side) is garbage
collected on the Python side.
:param auto_gc: if True, call gc.collect() before sending a command to
the Java side. This should prevent the gc from running between
sending the command and waiting for an anwser. False by default
because this case is extremely unlikely. Legacy option no longer
used.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a response from the Java side.
:param daemonize_memory_management: if True, the worker Thread making
the garbage collection requests will be daemonized. This means that
the Python side might not send all garbage collection requests if
it exits. If False, memory management will block the Python program
exit until all requests are sent.
:param auth_token: if provided, an authentication that token clients
must provide to the server when connecting.
"""
super(JavaParameters, self).__init__(
address, port, auto_field, auto_close, auto_convert, eager_load,
ssl_context, enable_memory_management, read_timeout, auth_token)
self.auto_gc = auto_gc
self.daemonize_memory_management = daemonize_memory_management
class PythonParameters(CallbackServerParameters):
"""Wrapper class that contains all parameters that can be passed to
configure a `ClientServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None, auto_gc=False,
accept_timeout=DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
read_timeout=None, propagate_java_exceptions=False,
auth_token=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25334.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
:param auto_gc: if True, call gc.collect() before returning a response
to the Java side. This should prevent the gc from running between
sending the response and waiting for a new command. False by
default because this case is extremely unlikely but could break
communication. Legacy option no longer used.
:param accept_timeout: if > 0, sets a timeout in seconds after which
the callbackserver stops waiting for a connection, sees if the
callback server should shut down, and if not, wait again for a
connection. The default is 5 seconds: this roughly means that
if can take up to 5 seconds to shut down the callback server.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a call or command from the
Java side.
:param propagate_java_exceptions: if `True`, any `Py4JJavaError` raised
by a Python callback will cause the nested `java_exception` to be
thrown on the Java side. If `False`, the `Py4JJavaError` will
manifest as a `Py4JException` on the Java side, just as with any
other kind of Python exception. Setting this option is useful if
you need to implement a Java interface where the user of the
interface has special handling for specific Java exception types.
:param auth_token: if provided, an authentication token that clients
must provide to the server when connecting.
"""
super(PythonParameters, self).__init__(
address, port, daemonize, daemonize_connections, eager_load,
ssl_context, accept_timeout, read_timeout,
propagate_java_exceptions, auth_token)
self.auto_gc = auto_gc
class JavaClient(GatewayClient):
"""Responsible for managing requests from Python to Java.
This implementation is thread-safe because it always use only one
ClientServerConnection per thread.
"""
def __init__(
self, java_parameters, python_parameters, gateway_property=None,
finalizer_deque=None):
"""
:param java_parameters: collection of parameters and flags used to
configure the JavaGateway (Java client)
:param python_parameters: collection of parameters and flags used to
configure the CallbackServer (Python server)
:param gateway_property: used to keep gateway preferences without a
cycle with the JavaGateway
:param finalizer_deque: deque used to manage garbage collection
requests.
"""
super(JavaClient, self).__init__(
java_parameters,
gateway_property=gateway_property)
self.java_parameters = java_parameters
self.python_parameters = python_parameters
self.thread_connection = local()
self.finalizer_deque = finalizer_deque
def garbage_collect_object(self, target_id, enqueue=True):
"""Tells the Java side that there is no longer a reference to this
JavaObject on the Python side. If enqueue is True, sends the request
to the FinalizerWorker deque. Otherwise, sends the request to the Java
side.
"""
if enqueue:
self.finalizer_deque.appendleft((self, target_id))
else:
super(JavaClient, self).garbage_collect_object(target_id)
def set_thread_connection(self, connection):
"""Associates a ClientServerConnection with the current thread.
:param connection: The ClientServerConnection to associate with the
current thread.
"""
self.thread_connection.connection = weakref.ref(connection)
def shutdown_gateway(self):
try:
super(JavaClient, self).shutdown_gateway()
finally:
self.finalizer_deque.appendleft(SHUTDOWN_FINALIZER_WORKER)
def get_thread_connection(self):
"""Returns the ClientServerConnection associated with this thread. Can
be None.
"""
connection = None
try:
connection_wr = self.thread_connection.connection
if connection_wr:
connection = connection_wr()
except AttributeError:
pass
return connection
def _get_connection(self):
connection = self.get_thread_connection()
try:
if connection is not None:
# Remove the strong reference to the connection
# It will be re-added after the command is sent.
self.deque.remove(connection)
except ValueError:
# Should never reach this point
pass
if connection is None or connection.socket is None:
connection = self._create_new_connection()
return connection
def _create_new_connection(self):
connection = ClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self)
connection.connect_to_java_server()
self.set_thread_connection(connection)
return connection
def _should_retry(self, retry, connection, pne=None):
# Only retry if Python was driving the communication.
parent_retry = super(JavaClient, self)._should_retry(
retry, connection, pne)
return parent_retry and retry and connection and\
connection.initiated_from_client
def _create_connection_guard(self, connection):
return ClientServerConnectionGuard(self, connection)
class ClientServerConnectionGuard(GatewayConnectionGuard):
"""Connection guard that does nothing on exit because there is no need to
close or give back a connection.
"""
def __exit__(self, type, value, traceback):
pass
class PythonServer(CallbackServer):
"""Responsible for managing requests from Java to Python.
"""
def __init__(
self, java_client, java_parameters, python_parameters,
gateway_property):
"""
:param java_client: the gateway client used to call Java objects.
:param java_parameters: collection of parameters and flags used to
configure the JavaGateway (Java client)
:param python_parameters: collection of parameters and flags used to
configure the CallbackServer (Python server)
:param gateway_property: used to keep gateway preferences.
"""
super(PythonServer, self).__init__(
pool=gateway_property.pool,
gateway_client=java_client,
callback_server_parameters=python_parameters)
self.java_parameters = java_parameters
self.python_parameters = python_parameters
self.gateway_property = gateway_property
def _create_connection(self, socket, stream):
connection = ClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self.gateway_client, python_server=self)
connection.init_socket_from_python_server(socket, stream)
return connection
class ClientServerConnection(object):
"""Default connection for a ClientServer instance
(socket-based, one per thread) responsible for communicating
with the Java Virtual Machine.
"""
def __init__(
self, java_parameters, python_parameters, gateway_property,
java_client, python_server=None):
"""
:param java_parameters: collection of parameters and flags used to
configure the JavaGateway (Java client)
:param python_parameters: collection of parameters and flags used to
configure the CallbackServer (Python server)
:param gateway_property: used to keep gateway preferences.
:param java_client: the gateway client used to call Java objects.
:param python_server: the Python server used to receive commands from
Java. Only provided if created from Python server.
"""
self.java_parameters = java_parameters
self.python_parameters = python_parameters
# For backward compatibility
self.address = self.java_parameters.address
self.port = self.java_parameters.port
self.java_address = self.java_parameters.address
self.java_port = self.java_parameters.port
self.python_address = self.python_parameters.address
self.python_port = self.python_parameters.port
self.ssl_context = self.java_parameters.ssl_context
self.socket = None
self.stream = None
self.gateway_property = gateway_property
self.pool = gateway_property.pool
self._listening_address = self._listening_port = None
self.is_connected = False
self.java_client = java_client
self.python_server = python_server
self.initiated_from_client = False
def connect_to_java_server(self):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.java_parameters.read_timeout:
self.socket.settimeout(self.java_parameters.read_timeout)
if self.ssl_context:
self.socket = self.ssl_context.wrap_socket(
self.socket, server_hostname=self.java_address)
self.socket.connect((self.java_address, self.java_port))
self.stream = self.socket.makefile("rb")
self.is_connected = True
self.initiated_from_client = True
self._authenticate_connection()
except Py4JAuthenticationError:
self.close(reset=True)
self.is_connected = False
raise
except Exception:
self.close()
self.is_connected = False
raise
def _authenticate_connection(self):
if self.java_parameters.auth_token:
cmd = "{0}\n{1}\n".format(
proto.AUTH_COMMAND_NAME,
self.java_parameters.auth_token
)
answer = self.send_command(cmd)
error, _ = proto.is_error(answer)
if error:
raise Py4JAuthenticationError(
"Failed to authenticate with gateway server.")
def init_socket_from_python_server(self, socket, stream):
self.socket = socket
self.stream = stream
self.is_connected = True
def shutdown_gateway(self):
"""Sends a shutdown command to the Java side.
This will close the ClientServer on the Java side: all active
connections will be closed. This may be useful if the lifecycle
of the Java program must be tied to the Python program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
self.close()
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def start(self):
t = Thread(target=self.run)
t.daemon = self.python_parameters.daemonize_connections
t.start()
def run(self):
self.java_client.set_thread_connection(self)
self.wait_for_commands()
def send_command(self, command):
# TODO At some point extract common code from wait_for_commands
logger.debug("Command to send: {0}".format(command))
try:
self.socket.sendall(command.encode("utf-8"))
except Exception as e:
logger.info("Error while sending or receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while sending", e, proto.ERROR_ON_SEND)
try:
while True:
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
raise Py4JNetworkError("Answer from Java side is empty")
if answer.startswith(proto.RETURN_MESSAGE):
return answer[1:]
else:
command = answer
obj_id = smart_decode(self.stream.readline())[:-1]
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.stream)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.stream.readline()
_garbage_collect_proxy(self.pool, obj_id)
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blocking,
# but at this point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Exception as e:
logger.info("Error while receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while sending or receiving", e, proto.ERROR_ON_RECEIVE)
def close(self, reset=False):
logger.info("Closing down clientserver connection")
if not self.socket:
return
if reset:
set_linger(self.socket)
quiet_close(self.stream)
if not reset:
quiet_shutdown(self.socket)
quiet_close(self.socket)
already_closed = self.socket is None
self.socket = None
self.stream = None
if not self.initiated_from_client and self.python_server and\
not already_closed:
server_connection_stopped.send(
self.python_server, connection=self)
def wait_for_commands(self):
logger.info("Python Server ready to receive messages")
reset = False
authenticated = self.python_parameters.auth_token is None
try:
while True:
command = smart_decode(self.stream.readline())[:-1]
if not authenticated:
# Will raise an exception if auth fails in any way.
authenticated = do_client_auth(
command, self.stream, self.socket,
self.python_parameters.auth_token)
continue
obj_id = smart_decode(self.stream.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.stream)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.stream.readline()
_garbage_collect_proxy(self.pool, obj_id)
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blocking, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Py4JAuthenticationError:
reset = True
logger.exception("Could not authenticate connection.")
except socket.timeout:
reset = True
logger.info(
"Timeout while python server was waiting for"
"a message", exc_info=True)
except Exception:
# This is a normal exception...
logger.info(
"Error while python server was waiting for"
"a message", exc_info=True)
self.close(reset)
def _call_proxy(self, obj_id, input):
if obj_id not in self.pool:
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part('Object ID unknown', self.pool)
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception as e:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
if self.python_parameters.propagate_java_exceptions and\
isinstance(e, proto.Py4JJavaError):
java_exception = e.java_exception
else:
java_exception = traceback.format_exc()
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part(java_exception, self.pool)
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.java_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class ClientServer(JavaGateway):
"""Subclass of JavaGateway that implements a different threading model: a
thread always use the same connection to the other side so callbacks are
executed in the calling thread.
For example, if Python thread 1 calls Java, and Java calls Python, the
callback (from Java to Python) will be executed in Python thread 1.
Note about authentication: to enable authentication
"""
def __init__(
self, java_parameters=None, python_parameters=None,
python_server_entry_point=None):
"""
:param java_parameters: collection of parameters and flags used to
configure the JavaGateway (Java client)
:param python_parameters: collection of parameters and flags used to
configure the CallbackServer (Python server)
:param python_server_entry_point: can be requested by the Java side if
Java is driving the communication.
"""
if not java_parameters:
java_parameters = JavaParameters()
if not python_parameters:
python_parameters = PythonParameters()
self.java_parameters = java_parameters
self.python_parameters = python_parameters
super(ClientServer, self).__init__(
gateway_parameters=java_parameters,
callback_server_parameters=python_parameters,
python_server_entry_point=python_server_entry_point
)
def _create_finalizer_worker(self):
worker_deque = deque()
worker = FinalizerWorker(worker_deque)
worker.daemon = self.java_parameters.daemonize_memory_management
worker.start()
return worker_deque
def _create_gateway_client(self):
worker_deque = self._create_finalizer_worker()
java_client = JavaClient(
self.java_parameters, self.python_parameters,
finalizer_deque=worker_deque)
return java_client
def _create_callback_server(self, callback_server_parameters):
callback_server = PythonServer(
self._gateway_client, self.java_parameters, self.python_parameters,
self.gateway_property)
return callback_server
|
listen.py
|
from __future__ import absolute_import
from __future__ import division
import errno
import socket
from pwnlib.context import context
from pwnlib.log import getLogger
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
log = getLogger(__name__)
class listen(sock):
r"""Creates an TCP or UDP-socket to receive data on. It supports
both IPv4 and IPv6.
The returned object supports all the methods from
:class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
Arguments:
port(int): The port to connect to.
Defaults to a port auto-selected by the operating system.
bindaddr(str): The address to bind to.
Defaults to ``0.0.0.0`` / `::`.
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
Examples:
>>> l = listen(1234)
>>> r = remote('localhost', l.lport)
>>> _ = l.wait_for_connection()
>>> l.sendline(b'Hello')
>>> r.recvline()
b'Hello\n'
>>> # It works with ipv4 by default
>>> l = listen()
>>> l.spawn_process('/bin/sh')
>>> r = remote('127.0.0.1', l.lport)
>>> r.sendline(b'echo Goodbye')
>>> r.recvline()
b'Goodbye\n'
>>> # and it works with ipv6 by defaut, too!
>>> l = listen()
>>> r = remote('::1', l.lport)
>>> r.sendline(b'Bye-bye')
>>> l.recvline()
b'Bye-bye\n'
"""
#: Local port
lport = 0
#: Local host
lhost = None
#: Socket type (e.g. socket.SOCK_STREAM)
type = None
#: Socket family
family = None
#: Socket protocol
protocol = None
#: Canonical name of the listening interface
canonname = None
#: Sockaddr structure that is being listened on
sockaddr = None
_accepter = None
def __init__(self, port=0, bindaddr='::',
fam='any', typ='tcp', *args, **kwargs):
super(listen, self).__init__(*args, **kwargs)
port = int(port)
fam = self._get_family(fam)
typ = self._get_type(typ)
if fam == socket.AF_INET and bindaddr == '::':
bindaddr = '0.0.0.0'
h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))
for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):
self.family, self.type, self.proto, self.canonname, self.sockaddr = res
if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
continue
h.status("Trying %s" % self.sockaddr[0])
listen_sock = socket.socket(self.family, self.type, self.proto)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.family == socket.AF_INET6:
try:
listen_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, fam == socket.AF_INET6)
except (socket.error, AttributeError):
self.warn("could not set socket to accept also IPV4")
listen_sock.bind(self.sockaddr)
self.lhost, self.lport = listen_sock.getsockname()[:2]
if self.type == socket.SOCK_STREAM:
listen_sock.listen(1)
break
else:
h.failure()
self.error("Could not bind to %s on port %d" % (bindaddr, port))
h.success()
h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))
def accepter():
while True:
try:
if self.type == socket.SOCK_STREAM:
self.sock, rhost = listen_sock.accept()
listen_sock.close()
else:
data, rhost = listen_sock.recvfrom(4096)
listen_sock.connect(rhost)
self.sock = listen_sock
self.unrecv(data)
self.settimeout(self.timeout)
break
except socket.error as e:
if e.errno == errno.EINTR:
continue
h.failure()
self.exception("Socket failure while waiting for connection")
self.sock = None
return
self.rhost, self.rport = rhost[:2]
h.success('Got connection from %s on port %d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def spawn_process(self, *args, **kwargs):
def accepter():
self.wait_for_connection()
self.sock.setblocking(1)
p = super(listen, self).spawn_process(*args, **kwargs)
p.wait()
self.close()
t = context.Thread(target = accepter)
t.daemon = True
t.start()
def wait_for_connection(self):
"""Blocks until a connection has been established."""
self.sock
return self
def __getattr__(self, key):
if key == 'sock':
self._accepter.join(timeout = self.timeout)
if 'sock' in self.__dict__:
return self.sock
else:
return None
else:
return getattr(super(listen, self), key)
def close(self):
# since `close` is scheduled to run on exit we must check that we got
# a connection or the program will hang in the `join` call above
if self._accepter and self._accepter.is_alive():
return
super(listen, self).close()
|
fs.py
|
import os,socket,threading,sys,ast, queue, shutil, xattr
from pathlib import Path
lock = threading.Lock()
Q = queue.Queue()
#
'''
To do:
3) Deleting Replicated Files
4) Connecting using hostname
5) Smart client and file transfer with data server direct
6) Making directory and file
7) Removing from list
'''
MAX_MSG = 1024
START_PORT = 7777
MAX_SERVS = 3
SERVER_ID = 7777
DFSOnline = 0
RootDir = 'root'
localfilelist = []
localreplicalist = []
serverlist={}
clientlist=[]
class bcolors:
HEADER = '\033[95m'#PURPLE
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'#YELLOW
FAIL = '\033[91m'#RED
ENDC = '\033[0m'#WHITE
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class serverContents:
def __init__(self):
self.fd = None
self.filelist = None
self.count = 99999
def globalListGenerator(ign_port):
globalfilelist=[]
for ports in serverlist:
if ports ==ign_port:
continue
if(serverlist[ports].filelist!=None):
for files in serverlist[ports].filelist:
globalfilelist.append(files[0])
return globalfilelist
def generateList():
del localfilelist[:]
del localreplicalist[:]
for root, dirs, files in os.walk(RootDir):
#localfilelist.append(os.path.relpath(root,RootDir))
prefx = os.path.relpath(root,RootDir)
if (prefx != '.'):
prefx = '/'+prefx
else:
prefx = ''
for f in files:
try:
att = (xattr.get(RootDir+prefx+'/'+f,'user.comment')).decode()
except OSError:
att = 0
if(f.count('%')>0):
fname = prefx+f.replace('%','/')
localfilelist.append([fname, str(att)])
localreplicalist.append(fname)
else:
localfilelist.append([prefx+'/'+f, str(att)])
for d in dirs:
localfilelist.append([prefx+'/'+d+'/',str(-1)])
serverlist[SERVER_ID].filelist = localfilelist
for files in localfilelist:
print(files)
def fileExists(name):
T = globalListGenerator(-1)
if('/' not in name[:1]):
name = '/'+name
for fil in T:
if fil == name:
return True
return False
def fileExistsLoc(name):
if('/' not in name[:1]):
name = '/'+name
for fil in localfilelist:
if fil[0] == name:
return True
return False
def repExistsLoc(name):
if('/' not in name[:1]):
name='/'+name
for file in localreplicalist:
if name == file:
return True
return False
def locFileLocator(name):
if('/' not in name[:1]):
name = '/'+name
for ind, files in enumerate(localfilelist):
if files[0]==name:
return ind
def custFileLocator(serv, name):
if('/' not in name[:1]):
name = '/'+name
for ind, files in enumerate(serverlist[serv].filelist):
if files[0]==name:
return ind
def fileLocator(name, ign_port):#return address of server with file
if('/' not in name[:1]):
name = '/'+name
globalfilelist=[]
gfl=[]
for ports in serverlist:
if ports == ign_port:
continue
if(serverlist[ports].filelist!=None):
globalfilelist.append(serverlist[ports].filelist)
gfl.append(ports)
for x,filelist in enumerate(globalfilelist):
for fil in filelist:
if fil[0] == name:
return gfl[x]
return -1
def broadcast(msg):
for port in serverlist:
if serverlist[port].fd!=None and port != SERVER_ID:
try:
serverlist[port].fd.sendall(msg)
except:
continue
def costCreationFunc(cost, ign_port):
if(ign_port ==0):
port=SERVER_ID
for sport in (serverlist):
if ign_port == sport:
continue
if serverlist[sport].fd!=None:
if len(serverlist[sport].filelist) < cost:
cost = len(serverlist[sport].filelist)
port = sport
return port
def syncFiles(serverid):
for files in serverlist[serverid].filelist:
for files2 in localfilelist:
if files[0] == files2[0]:
if int(files[1]) < int(files2[1]):
if(repExistsLoc(files2[0])):
files2[0] =files2[0].replace('/','%')
name = RootDir+'/'+files2[0]
serverlist[serverid].fd.sendall(('fil_up'+files[0]+';'+files2[1]+';'+Path(name).read_text() +'Āā').encode())
serverlist[serverid].filelist[1] = files2[1]
break
def cmdParse(cmd):
#filelist = os.listdir("root")
ret_msg = ''
if cmd == 'peek'or cmd == 'dir':
T = globalListGenerator(-1)
#T.extend(globalReplicGenerator())
T = set(T)
ret_msg = '-'*10 +'File Directory' + '-'*10 +'\n'
ret_msg +=RootDir+'/\n'
filelists = sorted(T)
for ind, files in enumerate(filelists):
lvl = files[:-1].count('/')
name = files[:-1].rfind('/')
prev_lvl =filelists[ind-1][:-1].count('/')
ret_msg += ' '*lvl
if(ind == len(filelists)-1):
ret_msg += '└'+files[name:]+'\n'
continue
else:
nxt_lvl = filelists[ind+1][:-1].count('/')
if(lvl>prev_lvl and nxt_lvl == lvl):
ret_msg += '┌'
elif( lvl == nxt_lvl):
ret_msg += '├'
else:
ret_msg += '└'
ret_msg += files[name:]+'\n'
elif cmd[:5] == 'read ':
path = cmd[5:]
exe_str = cmd[-(len(path)-(path.rfind('.'))):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File not readable'
elif not(fileExists(path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(path)):
if(repExistsLoc(path)):
path =path.replace('/','%')
if('%' not in path[:1]):
path='%'+path
try:
ret_msg =('_'*40+'\n' +Path(RootDir+"/"+path).read_text()+'_'*40)
except Exception as e:
ret_msg = str(e)
else:
port = fileLocator(path, -1)
#serverlist[port].fd.sendall(('give'+path).encode())
#ret_msg ='_'*40+'\n' + Q.get()+'\n'+'_'*40
ret_msg = 'con'+repr(serverlist[port].fd.getpeername()[0])
elif cmd[:5] == 'writ ':
fil_path = cmd[5:]
tpath = '%' + fil_path.replace('/','%')
exe_str = cmd[-(len(fil_path)-(fil_path.rfind('.'))):]
extensions = ['.txt','.c','.py']
ret_msg = 'wr'+tpath+';'
if not(any(x in exe_str for x in extensions)):
ret_msg ='File Cannot Be Opened'
elif not(fileExists(fil_path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(fil_path)):
if(repExistsLoc(fil_path)):
fil_path =fil_path.replace('/','%')
if('%' not in fil_path[:1]):
fil_path='%'+fil_path
try:
ret_msg+= (Path(RootDir+"/"+fil_path).read_text() +'Āā')
except Exception as e:
ret_msg = str(e)
else:
port = fileLocator(fil_path, -1)
#serverlist[port].fd.sendall(('give'+fil_path).encode())
#ret_msg+= (Q.get()+'Āā')
ret_msg = 'con'+repr(serverlist[port].fd.getpeername()[0])
elif cmd[:5] == 'updt ':
fil_path = cmd[5:cmd.find(';')].replace('%','/')
fil_path = fil_path[1:]
if (not fileExists(fil_path)):
ret_msg='File Does not Exists'
elif (fileExistsLoc(fil_path)):
name = RootDir+'/'+fil_path
if(repExistsLoc(fil_path)):
fil_path =fil_path.replace('/','%')
if('%' not in fil_path[:1]):
name = RootDir+'/%'+fil_path
try:
with open(name,'w') as f:
f.write(cmd[cmd.find(';')+1:])
ret_msg = 'Written To File'
att =int( localfilelist[locFileLocator(fil_path)][1])+1
localfilelist[locFileLocator(fil_path)][1] = str(att)
xattr.set(name, 'user.comment', str(att))
#broadcast(('ver_up'+str(att)+';/'+fil_path+'Ĕ').encode())
ind = fileLocator(fil_path, SERVER_ID)
if(ind!=-1):
if(int(serverlist[ind].filelist[custFileLocator(ind, fil_path)][1]) < att):
serverlist[ind].fd.sendall(('ver_up'+str(att)+';/'+fil_path+'Ĕ'+cmd+'Āā'+'Ĕ').encode())
except Exception as e:
ret_msg = str(e)
else:
serverlist[fileLocator(fil_path,-1)].fd.sendall((cmd+'Āā').encode())
ret_msg = 'Written To File'
elif cmd[:5] == 'make ':
if (fileExists(cmd[5:])):
ret_msg='File Already Exists'
else:
cost = len(localfilelist)
port = SERVER_ID
if(cmd.rfind('/')!=-1):
if (fileExistsLoc(cmd[5:cmd.rfind('/')])):
cost=0
elif fileExists(cmd[5:cmd.rfind('/')]):
cost=0
port = fileLocator(cmd[5:cmd.rfind('/')])
if cost !=0:
port = costCreationFunc(cost,0)
if(port==SERVER_ID):
flag = True
ret_msg ='File Created'
try:
file1 = open(RootDir+"/"+cmd[5:],'w+')
file1.close()
except IsADirectoryError:
os.makedirs(RootDir+"/"+cmd[5:cmd.rfind('/')])
flag = False
ret_msg = 'Directory Created'
except FileNotFoundError:
os.makedirs(RootDir+"/"+cmd[5:cmd.rfind('/')])
file1 = open(RootDir+"/"+cmd[5:],'w+')
file1.close()
broadcast(('dir_ap'+"/"+cmd[5:]+'Ĕ').encode())
if(flag):
lock.acquire()
localfilelist.append(["/"+cmd[5:],str(0)])
lock.release()
xattr.set(RootDir+"/"+cmd[5:],"user.comment", str(0))
if(DFSOnline!=0):
replic_serv = costCreationFunc(9999,SERVER_ID)
serverlist[replic_serv].fd.sendall(('rep%'+cmd[5:].replace('/','%')).encode())
else:
lock.acquire()
localfilelist.append(["/"+cmd[5:],str(-1)])
lock.release()
else:
serverlist[port].fd.sendall((cmd).encode())
ret_msg ='File Created'
elif cmd[:5] == 'remv ':
if not(fileExists(cmd[5:])):
ret_msg ='File Does Not Exists'
else:
ret_msg ='File Deleted'
if (fileExistsLoc(cmd[5:])):
name = cmd[5:]
if('/' not in name[:1]):
name = '/'+name
if(cmd[-1:]!='/'):
os.remove(RootDir+name)
else:
try:
os.rmdir(RootDir+"/"+cmd[5:-1])
ret_msg = "Directory Deleted"
except OSError:
ret_msg = "To Delete Non-empty Directories, use rmdr"
lock.acquire()
del localfilelist[locFileLocator(name)]
lock.release()
broadcast(('dir_dl'+name).encode())
else:
serverlist[fileLocator(cmd[5:]), -1].fd.sendall((cmd).encode())
elif cmd[:5] == 'rmdr ':
if not(fileExists(cmd[5:])):
ret_msg ='Directory Does Not Exists'
else:
ret_msg ='Directory Deleted'
if (fileExistsLoc(cmd[5:])):
shutil.rmtree((RootDir+"/"+cmd[5:]))
lock.acquire()
generateList()
lock.release()
broadcast(('dir_up'+repr(localfilelist)).encode())
else:
serverlist[fileLocator(cmd[5:]), -1].fd.sendall((cmd).encode())
elif cmd[:5] == 'apen ':
text = cmd.split(' ',2)
exe_str = cmd[(text[1]).rfind('.'):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File not readable'
elif not(fileExists(text[1])):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(text[1])):
try:
with open("root/"+text[1], 'a+') as f:
f.write(text[2]+'\n')
ret_msg = 'appended to file'
except Exception as e:
ret_msg = str(e)
else:
port = fileLocator(text[1], -1)
serverlist[port].fd.sendall((cmd).encode())
ret_msg = 'appended to file'
elif cmd[:5] == "open ":
path = cmd[5:]
exe_str = cmd[-(len(path)-(path.rfind('.'))):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File Cannot Be Opened'
elif not(fileExists(path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(path)):
if(os.fork()==0):
ret_msg = 'File Opened'
os.execvp('gedit',['gedit', './'+RootDir+'/'+path])
else:
port = fileLocator(path, -1)
serverlist[port].fd.sendall(('give'+path).encode())
ret_msg = 'File Opened'
if path.rfind('/') !=-1:
tpath = '%'+path.replace('/', '%')#path[path.rfind('/')+1:]
else:
tpath = '%'+path
with open(tpath, 'x') as f:
f.write(Q.get())
ret_msg = 'File Opened'
if(os.fork()==0):
os.execvp('gedit',['gedit', tpath])
elif cmd == 'cons':
ret_msg ='_'*40
for servers in serverlist:
if serverlist[servers].fd!=None:
ret_msg +='\n'+repr(serverlist[servers].fd) + ' ' +str(servers)
ret_msg +='_'*40
elif cmd[:5] == 'exis ':
if (fileExists(cmd[5:])):
ret_msg = 'File Present'
else:
ret_msg = 'File Absent'
#elif cmd == 'repl':
# for serports in serverlist:
# ret_msg += repr(serverlist[serports].replicalist)
elif ('help'in cmd or 'cmd' in cmd):
ret_msg ='_'*30 + "\nList Of Possible Commands:\n" + '-'*30+"\npeek View File Directory ..\ncons View Connections ..\nmake [file] ..\nremv [file] ..\nexis [file] ..\nread [file] ..\nwrit [file] ..\nupdt [file] ..\nexit Close Program ..\n"+'-'*30 #\napen [file] [text] ..
else:
ret_msg ='Invalid Command. Use help.'
return ('\n' + ret_msg)
def recServMsg(fd):
while(True):
data = fd.recv(MAX_MSG).decode()
port = 0
for ports in serverlist:
if serverlist[ports].fd == fd:
port = ports
break
if len(data) >0:
print('\nMsg Recieved from Server: ', port,' : ',data, '\n<cmd>: ', end='',flush=True)
all_data = data.split('Ĕ')
for data in all_data:
if(len(data)<1):
continue
if data[:6] == 'dir_up':
serverlist[port].filelist = ast.literal_eval(data[6:])
elif data[:6] == 'dir_ap':
serverlist[port].filelist.append([data[6:], str(0)])
elif data[:6] == 'dir_dl':
for files in serverlist[port].filelist:
if data[6:] == files[0]:
serverlist[port].filelist.remove(files)
break
elif data[:6] == 'ver_up':
data = data.split(';')
serverlist[port].filelist[custFileLocator(port, data[1])][1] = data[0][6:]
elif data[:4] == 'rep%':
file1 = open(RootDir+"/"+data[3:],'w+')
file1.close()
fname = data[3:].replace('%','/')
xattr.set(RootDir+"/"+data[3:],"user.comment", str(0))
lock.acquire()
localfilelist.append([fname, str(0)])
localreplicalist.append(fname)
lock.release()
broadcast(('dir_ap'+fname).encode())
elif data[:4] == 'give':
try:
file_content = Path("root/"+data[4:]).read_text()
fd.sendall(('fil_msg'+';'+file_content+'Āā').encode())
except Exception as e:
fd.sendall(e.encode())
elif(data[:5] == 'updt '):
while(data[-2:]!='Āā'):
data +=fd.recv(MAX_MSG).decode()
reply = cmdParse(data[:-2])
fd.sendall(reply.encode())
elif(data[:6] == 'fil_up'):
file_data = data.split(';')
while(file_data[2][-2:]!='Āā'):
file_data[2] += fd.recv(MAX_MSG).decode()
fil_path = file_data[0][6:]
name = RootDir+"/"+fil_path
if(repExistsLoc(fil_path)):
name = RootDir+ '/'+fil_path.replace('/','%')
with open(name,'w') as f:
f.write(file_data[2][:-2])
localfilelist[locFileLocator(fil_path)][1] = file_data[1]
xattr.set(name, 'user.comment', file_data[1])
elif data[:7] == 'fil_msg':
file_data = data.split(';')
while(file_data[1][-2:]!='Āā'):
file_data[1] += fd.recv(MAX_MSG).decode()
Q.put(file_data[1][:-2])
#print(result + '\n<cmd>: ', end='',flush=True)
elif data[:4] == 'apen':
cmdParse(data)
elif data[:5] == 'remv ':
print(cmdParse(data))
elif data[:5] == 'make ':
print(cmdParse(data))
else:
print('\nTerminating Connection:', port,fd.getpeername(),'\n<cmd>: ', end='',flush=True)
fd.close()
serverlist[port].fd = None
serverlist[port].filelist = None
global DFSOnline
DFSOnline-=1
break
def recCliMsg(fd):
while(True):
data = (fd.recv(MAX_MSG)).decode()
if len(data) >0:
print('\nMsg Recieved from Client: ', repr(fd.getpeername()),' : ',data, '\n<cmd>: ', end='',flush=True)
if(data[:5] == 'updt '):
while(data[-2:]!='Āā'):
data +=fd.recv(MAX_MSG).decode()
data = data[:-2]
reply = cmdParse(data)
fd.sendall(reply.encode())
else:
print('\nTerminating Connection with Client:', fd.getpeername(),'\n<cmd>: ', end='',flush=True)
clientlist.remove(fd)
fd.close()
break
def sockListen(sock):
#print('Thread SockBind')
sock.listen()
while(True):
conn, addr = sock.accept()
#print('Conn Accept Loop')
if(sock.getsockname()[1]>=START_PORT):
data = conn.recv(MAX_MSG).decode()
while(data[-2:]!='Āā'):
data +=conn.recv(MAX_MSG).decode()
data = data.split(';')
server_port=int(data[0])
serverlist[server_port].fd= conn
conn.sendall((repr(localfilelist)+'Āā').encode())#+';'+repr(localreplicalist)
#data = data.split(';')
lock.acquire()
serverlist[server_port].filelist = ast.literal_eval(data[1][:-2])
syncFiles(server_port)
lock.release()
#serverlist[server_port].replicalist = ast.literal_eval(data[1][:-2])
#print('\nMsg:'+repr(serverlist[server_port].filelist))
print('\nIncoming Server Connection:', server_port, addr,'\n<cmd>: ', end='',flush=True)
global DFSOnline
lock.acquire()
DFSOnline+=1
lock.release()
threading.Thread(target=recServMsg, kwargs={'fd':conn}).start()
else:
clientlist.append(conn)
#print(clientlist)
print('\nIncoming Client Connection:', addr,'\n<cmd>: ', end='',flush=True)
threading.Thread(target=recCliMsg, kwargs={'fd':conn}).start()
def main():
for x in range(MAX_SERVS):
serverlist[START_PORT+x]=serverContents()
print('Available ports: ',list(serverlist.keys()))
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
inc=0
while True:
try:
#arg = int(input('Select Server Port: '))
arg = START_PORT+inc
if arg not in serverlist.keys():
raise ValueError
serv.bind(('127.0.0.1', arg))
break
except ValueError:
print('Error: Incorrect Port Number')
except OSError:
print('Port Already In Use')
finally:
inc+=1
global SERVER_ID, DFSOnline #, localreplicalist
SERVER_ID = arg
generateList()
#generateRepList()
serverlist[SERVER_ID].fd=serv
t = threading.Thread(target=sockListen, kwargs={"sock": serv})
t.daemon = True
t.start()
onlineServs = []
offlineServers=[]
i=0
for servers in serverlist:
if servers == int(SERVER_ID):
continue
onlineServs.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
onlineServs[i].connect(('127.0.0.1', servers))
lock.acquire()
DFSOnline+=1
lock.release()
serverlist[servers].fd = onlineServs[i]
print('Connected to Server: ', servers)
onlineServs[i].sendall((str(SERVER_ID)+';'+repr(localfilelist)+'Āā').encode())
data = onlineServs[i].recv(MAX_MSG).decode()
while(data[-2:]!='Āā'):
data +=onlineServs[i].recv(MAX_MSG).decode()
#data = data.split(';')
#lock.acquire()
#serverlist[servers].filelist = ast.literal_eval(data[:-2])
#syncFiles(servers)
#lock.release()
#serverlist[servers].replicalist = ast.literal_eval(data[1][:-2])
#onlineServs[i].sendall().encode())#+';'+repr(localreplicalist)
lock.acquire()
serverlist[servers].filelist = ast.literal_eval(data[:-2])
syncFiles(servers)
lock.release()
t = threading.Thread(target=recServMsg, kwargs={'fd':onlineServs[i]})
t.daemon = True
t.start()
except ConnectionRefusedError:
offlineServers.append(servers)
i+=1
print('Offline Servers: ', offlineServers)
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while(True):
try:
cli.bind(('127.0.0.1', START_PORT-int(input('no:'))))
break
except OSError:
print('try another')
ct = threading.Thread(target = sockListen, kwargs={'sock':cli})
ct.daemon = True
ct.start()
while(True):
cmd=input('<cmd>: ')
if(cmd=='close' or cmd == 'exit'):
sys.exit()
print(cmdParse(cmd))
if __name__ == "__main__":
main()
|
stock_correlation.py
|
#!/usr/bin/env python3
""" Find and visualize correlation between various equities.
Takes ticker symbols as parameters
"""
import argparse
import os
import sys
import re
import time
import math
import urllib.request
import platform
import threading
import queue
import svgwrite # pip install svgwrite
QUOTE_API = "https://query1.finance.yahoo.com/v7/finance/download/"
# 2000000000 means this will work until May, 17, 2033
QUOTE_URL = (
QUOTE_API
+ "%(symbol)s?period1=0&period2=2000000000&interval=1d"
+ "&events=history&includeAdjustedClose=true"
)
USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
)
STATS_URL = "https://finance.yahoo.com/quote/%(symbol)s"
YIELD_PATTERN = re.compile(r""""(dividendYield|yield)":{"raw":([0-9.]+),""")
EXPENSE_PATTERN = re.compile(r""""annualReportExpenseRatio":{"raw":([0-9.]+),""")
NET_ASSETS = re.compile(r"""(totalAssets|marketCap)":{"raw":([0-9.]+),""")
MAX_CIRCLE_RADIANS = 2.0 * 3.14159265
def start_thread(target, *args):
t = threading.Thread(target=target, args=args)
t.setDaemon(True)
t.start()
return t
def cache_file_path(*parts):
"""creates a path to a temporary file that can be created."""
(tm_year, tm_mon, tm_day, _, _, _, _, _, _) = time.localtime()
parts = list(parts)
parts.extend((tm_year, tm_mon, tm_day))
if platform.system() == "Darwin":
cache_dir = os.path.join(
os.environ["HOME"], "Library", "Caches", os.path.split(__file__)[1]
)
elif platform.system() == "Linux":
cache_dir = os.path.join(os.environ["HOME"], "." + os.path.split(__file__)[1])
else:
cache_dir = os.path.join(os.environ["TMP"], os.path.split(__file__)[1])
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
return os.path.join(cache_dir, "_".join([str(x) for x in parts]))
def get_url_contents(url, *name_parts):
"""Get the contents of a web page"""
cache_path = cache_file_path(*name_parts)
try:
with open(cache_path, "rb") as cache_file:
contents = cache_file.read()
except FileNotFoundError:
request = urllib.request.Request(
url, data=None, headers={"User-Agent": USER_AGENT}
)
with urllib.request.urlopen(request) as connection:
contents = connection.read()
with open(cache_path + ".tmp", "wb") as cache_file:
cache_file.write(contents)
try:
os.rename(cache_path + ".tmp", cache_path)
except:
pass
return contents
def get_symbol_history(symbol):
"""Get the history of an equity symbol"""
return get_url_contents(QUOTE_URL % {"symbol": symbol}, "history", symbol).decode(
"utf-8"
)
def get_symbol_stats(symbol):
"""Get expense ratio and yield of an equity"""
contents = get_url_contents(STATS_URL % {"symbol": symbol}, "stats", symbol).decode(
"utf-8"
)
has_expense_ratio = EXPENSE_PATTERN.search(contents)
return {
"yield": float(YIELD_PATTERN.search(contents).group(2)),
"expense_ratio": float(has_expense_ratio.group(1) if has_expense_ratio else 0.0),
"total_value": float(NET_ASSETS.search(contents).group(2)),
}
def load_history(symbol):
"""Get this history as a dictionary of date to information on that date"""
contents = get_symbol_history(symbol)
lines = contents.replace("\r\n", "\n").replace("\r", "\n").strip().split("\n")
fields = lines.pop(0).split(",")
dates = [dict(zip(fields, x.split(","))) for x in lines]
return {x["Date"]: x for x in dates}
def date_to_seconds(date_str):
"""Convert date to seconds"""
return time.mktime(time.strptime(date_str, "%Y-%m-%d"))
def calculate_variance(history, stats):
"""Compare the histories of all symbols and get their variance from line fit"""
mean_date = sum([date_to_seconds(d) for d in history]) / len(history)
mean_adj_close = sum([float(history[d]["Adj Close"]) for d in history]) / len(
history
)
product_sum = sum(
[
(date_to_seconds(d) - mean_date) * (float(history[d]["Adj Close"]))
for d in history
]
)
date_square_sum = sum([(date_to_seconds(d) - mean_date) ** 2 for d in history])
slope = product_sum / date_square_sum
y_intercept = mean_adj_close - slope * mean_date
for date in history:
expected_adj_close = slope * date_to_seconds(date) + y_intercept
actual_value = float(history[date]["Adj Close"])
history[date]["variance"] = (
actual_value - expected_adj_close
) / expected_adj_close
# normalize variances (0% to 100%)
min_variance = min([history[d]["variance"] for d in history])
max_variance = max([history[d]["variance"] for d in history])
for date in history:
history[date]["std_variance"] = (history[date]["variance"] - min_variance) / (
max_variance - min_variance
)
result = {'history': history, 'slope': slope * 60 * 60 * 24 * 365 / mean_adj_close}
result.update({'stats': stats})
return result
def calculate_distance(history1, history2, key="variance"):
"""Determine how much two histories varies"""
overalapping_dates = [d for d in history1 if d in history2]
square_sum = 0.0
for date in overalapping_dates:
square_sum += (history1[date][key] - history2[date][key]) ** 2
return math.sqrt(square_sum)
class Point:
"""A point in 2D space"""
def __init__(self, x, y):
"""create a new point"""
(self.__x, self.__y) = (
x,
y,
)
def __add__(self, vector):
"""Add a vector onto a point"""
return Point(vector.get_dx() + self.__x, vector.get_dy() + self.__y)
def __sub__(self, point):
"""Find a vector between two points"""
return Vector(self.get_x() - point.get_x(), self.get_y() - point.get_y())
def __str__(self):
"""Display the point"""
return "(%0.2f, %0.2f)" % (self.__x, self.__y)
def __repr__(self):
"""display the point"""
return str(self)
def get_x(self):
"""Get X coordinate"""
return self.__x
def get_y(self):
"""Get Y coordinate"""
return self.__y
class Vector:
"""A vector in 2D space"""
def __init__(self, dx, dy):
"""create a vector"""
(self.__dx, self.__dy) = (dx, dy)
def __add__(self, vector):
"""Add two vectors"""
return Vector(self.get_dx() + vector.get_dx(), self.get_dy() + vector.get_dy())
def __str__(self):
"""display the vector"""
return "[%0.2f, %0.2f]" % (self.__dx, self.__dy)
def __repr__(self):
"""display the vector"""
return str(self)
def get_dx(self):
"""Get the change in X direction"""
return self.__dx
def get_dy(self):
"""Get the change in Y direction"""
return self.__dy
def magnitude(self):
"""Get the magnitude of the vector"""
return math.sqrt(self.__dx ** 2 + self.__dy ** 2)
def scaled(self, factor):
"""Scale the vector"""
return Vector(factor * self.__dx, factor * self.__dy)
def add_distances(histories):
"""Calculate the distance (difference in variance) between all equities"""
for symbol in histories:
histories[symbol]["distance"] = {
s: calculate_distance(
histories[symbol]["history"], histories[s]["history"], "variance"
)
for s in histories
if s != symbol
}
histories[symbol]["std_distance"] = {
s: calculate_distance(
histories[symbol]["history"], histories[s]["history"], "std_variance"
)
for s in histories
if s != symbol
}
def movement(symbol1, symbol2, points, histories):
"""Move symbol1 towards the expected distance from symbol2"""
distance = points[symbol2] - points[symbol1]
distance_magnitude = distance.magnitude()
expected_distance = histories[symbol1]["std_distance"][symbol2]
return (
distance.scaled((distance_magnitude - expected_distance) / distance_magnitude)
if distance_magnitude > 0
else Vector(0.0, 0.0)
)
def apply_gravity(points, histories, speed=0.10):
"""Move all points towards their expected distances from all other points"""
velocities = {s: Vector(0, 0) for s in histories}
largest_velocity = Vector(0, 0)
for symbol1 in histories:
for symbol2 in [s for s in histories if s != symbol1]:
distance_to_expected = movement(symbol1, symbol2, points, histories)
velocities[symbol1] += distance_to_expected.scaled(speed / 2.0)
for symbol in points:
points[symbol] = points[symbol] + velocities[symbol]
if velocities[symbol].magnitude() > largest_velocity.magnitude():
largest_velocity = velocities[symbol]
return largest_velocity.magnitude()
def bubble_color(expense_ratio, min_expense_ratio, max_expense_ratio, slope, min_slope, max_slope):
min_saturation = 0.80
red = int(
255
* (expense_ratio - min_expense_ratio)
/ (max_expense_ratio - min_expense_ratio)
) if max_expense_ratio > min_expense_ratio else 128
green = int(
255
* (max_expense_ratio - expense_ratio)
/ (max_expense_ratio - min_expense_ratio)
) if max_expense_ratio > min_expense_ratio else 128
blue = 0
saturation = (
(slope - min_slope)
/ (max_slope - min_slope)) if max_slope > min_slope else 0.50
return "#%02x%02x%02x" % (
red + int((255 - red) * min_saturation * (1.00 - saturation)),
green + int((255 - green) * min_saturation * (1.00 - saturation)),
blue + int((255 - blue) * min_saturation * (1.00 - saturation))
)
def add_circle(drawing, main_drawing, location, radius, color):
drawing.add(
main_drawing.circle(
center=location,
r=radius,
fill=color,
)
)
def add_label(drawing, main_drawing, location, text, rotate=0, size="1px"):
drawing.add(
main_drawing.text(
text,
insert=location,
font_size=size,
transform='rotate(%d,%s, %s)' % (rotate, location[0], location[1]),
)
)
def add_rect(drawing, main_drawing, x, y, width, height, color):
drawing.add(
main_drawing.rect((x, y), (width, height), fill=color)
)
def graph_key(drawing, main_drawing, width, height, radius_info, color_info, saturation_info):
max_radius = radius_info['max']
min_radius = radius_info['min']
mid_radius = (max_radius + min_radius) / 2
max_yield = 100.0 * radius_info['max_value']
min_yield = 100.0 * radius_info['min_value']
mid_yield = (max_yield + min_yield) / 2
max_expense_ratio = color_info['max_value']
min_expense_ratio = color_info['min_value']
mid_expense_ratio = (max_expense_ratio + min_expense_ratio) / 2
max_slope= saturation_info['max_value']
min_slope = saturation_info['min_value']
mid_slope = (max_slope + min_slope) / 2
border = 0.5
cell_size = 3.0
color_table = [
[
bubble_color(max_expense_ratio, min_expense_ratio, max_expense_ratio, min_slope, min_slope, max_slope),
bubble_color(mid_expense_ratio, min_expense_ratio, max_expense_ratio, min_slope, min_slope, max_slope),
bubble_color(min_expense_ratio, min_expense_ratio, max_expense_ratio, min_slope, min_slope, max_slope),
],
[
bubble_color(max_expense_ratio, min_expense_ratio, max_expense_ratio, mid_slope, min_slope, max_slope),
bubble_color(mid_expense_ratio, min_expense_ratio, max_expense_ratio, mid_slope, min_slope, max_slope),
bubble_color(min_expense_ratio, min_expense_ratio, max_expense_ratio, mid_slope, min_slope, max_slope),
],
[
bubble_color(max_expense_ratio, min_expense_ratio, max_expense_ratio, max_slope, min_slope, max_slope),
bubble_color(mid_expense_ratio, min_expense_ratio, max_expense_ratio, max_slope, min_slope, max_slope),
bubble_color(min_expense_ratio, min_expense_ratio, max_expense_ratio, max_slope, min_slope, max_slope),
],
]
circle_center = (width - max_radius - border, height - max_radius - border)
add_circle(drawing, main_drawing, circle_center, max_radius, "#88AAFF")
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(max_yield))
circle_center = (width - 2 * max_radius - mid_radius - border, height - mid_radius - border)
add_circle(drawing, main_drawing, circle_center, mid_radius, "#88AAFF")
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(mid_yield))
circle_center = (width - 2 * max_radius - 2 * mid_radius - min_radius - border, height - min_radius - border)
add_circle(drawing, main_drawing, circle_center, min_radius, "#88AAFF")
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(min_yield))
circle_center = (width - 2 * max_radius - border, height - border)
add_label(drawing, main_drawing, circle_center, "Yield")
for row in range(0, len(color_table)):
for column in range(0, len(color_table[row])):
add_rect(drawing, main_drawing, border + column * cell_size, height - border - (row + 1) * cell_size, cell_size, cell_size, color_table[row][column])
circle_center = (border + cell_size * 0 + border, height - border - cell_size * 3 - border)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * max_expense_ratio), rotate=-30)
circle_center = (border + cell_size * 1 + border, height - border - cell_size * 3 - border)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * mid_expense_ratio), rotate=-30)
circle_center = (border + cell_size * 2 + border, height - border - cell_size * 3 - border)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * min_expense_ratio), rotate=-30)
circle_center = (border + cell_size * 0, height - border - cell_size * 4 - border)
add_label(drawing, main_drawing, circle_center, "Expense Ratio")
circle_center = (border + cell_size * 3 + border, height - border - cell_size * 0 - cell_size / 2)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * min_slope))
circle_center = (border + cell_size * 3 + border, height - border - cell_size * 1 - cell_size / 2)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * mid_slope))
circle_center = (border + cell_size * 3 + border, height - border - cell_size * 2 - cell_size / 2)
add_label(drawing, main_drawing, circle_center, "%0.2f%%"%(100.0 * max_slope))
circle_center = (border + cell_size * 4 + 2 * border, height - border - cell_size * 2 - cell_size / 2)
add_label(drawing, main_drawing, circle_center, "Growth Rate", rotate=90)
def graph_points(histories, points=None, scale=1):
"""Graph all the equities"""
# pylint: disable=too-many-locals
if points is None:
points = {s: Point(*histories[s]["std_location"]) for s in histories}
max_radius = min(
[
min(
[
histories[s1]["std_distance"][s2]
for s2 in histories[s1]["std_distance"]
]
)
for s1 in histories
]
)
# sqrt because yield is radius
min_yield = math.sqrt(min([histories[s]["stats"]["yield"] for s in histories]))
max_yield = math.sqrt(max([histories[s]["stats"]["yield"] for s in histories]))
min_expense_ratio = min([histories[s]["stats"]["expense_ratio"] for s in histories])
max_expense_ratio = max([histories[s]["stats"]["expense_ratio"] for s in histories])
min_slope = min([histories[s]["slope"] for s in histories])
max_slope = max([histories[s]["slope"] for s in histories])
min_radius = 0.25 * max_radius
min_x = min([points[p].get_x() for p in points]) - 2 * max_radius
max_x = max([points[p].get_x() for p in points]) + 2 * max_radius
min_y = min([points[p].get_y() for p in points]) - 2 * max_radius
max_y = max([points[p].get_y() for p in points]) + 2 * max_radius
footer = 15
right_margin = 5
main_drawing = svgwrite.Drawing(
size=(scale * (max_x - min_x + right_margin), scale * (max_y - min_y + footer))
)
drawing = main_drawing.g(transform="scale(%d)" % (scale))
add_rect(drawing, main_drawing, 0, 0, max_x - min_x + right_margin, max_y - min_y + footer, "lightgray")
graph_key(drawing, main_drawing, (max_x - min_x + right_margin), (max_y - min_y + footer),
{'min': min_radius, 'max': max_radius, 'min_value': min_yield**2, 'max_value': max_yield**2},
{'min_value': min_expense_ratio, 'max_value': max_expense_ratio},
{'min_value': min_slope, 'max_value': max_slope})
for symbol in points:
expense_ratio = histories[symbol]["stats"]["expense_ratio"]
slope = histories[symbol]["slope"]
color = bubble_color(expense_ratio, min_expense_ratio, max_expense_ratio, slope, min_slope, max_slope)
dividend = math.sqrt(histories[symbol]["stats"]["yield"])
radius = (max_radius - min_radius) * (dividend - min_yield) / (
max_yield - min_yield
) + min_radius
add_circle(drawing, main_drawing, (points[symbol].get_x() - min_x, points[symbol].get_y() - min_y), radius, color)
for symbol in points:
add_label(drawing, main_drawing, (points[symbol].get_x() - min_x, points[symbol].get_y() - min_y), symbol)
main_drawing.add(drawing)
return main_drawing.tostring()
def add_locations(histories):
"""Place the equities in the edge of a circle, close to their nearest equity"""
# pylint: disable=too-many-locals
max_distance = max(
[
max(
[
histories[s1]["std_distance"][s2]
for s2 in histories[s1]["std_distance"]
]
)
for s1 in histories
]
)
min_distance = min(
[
min(
[
histories[s1]["std_distance"][s2]
for s2 in histories[s1]["std_distance"]
]
)
for s1 in histories
]
)
circle_radius = max_distance * (len(histories) - 1) / 2.0
radians_per_point = MAX_CIRCLE_RADIANS / len(histories)
symbols = list(histories)
negative = True
index = 0
start_symbol = [
s1
for s1 in histories
if min_distance
== min(
[histories[s1]["std_distance"][s2] for s2 in histories[s1]["std_distance"]]
)
][0]
points = {
start_symbol: Point(
math.cos(index * radians_per_point) * circle_radius,
math.sin(index * radians_per_point) * circle_radius,
)
}
symbols.remove(start_symbol)
used_symbols = [start_symbol]
while symbols:
sign = -1 if negative else 1
if negative:
index += 1
near_symbol = used_symbols[0]
insert_location = 0
else:
near_symbol = used_symbols[-1]
insert_location = len(used_symbols)
next_symbol = sorted(
symbols,
key=lambda s: histories[near_symbol]["std_distance"][s],
)[0]
points[next_symbol] = Point(
math.cos(sign * index * radians_per_point) * circle_radius,
math.sin(sign * index * radians_per_point) * circle_radius,
)
negative = not negative
symbols.remove(next_symbol)
used_symbols.insert(insert_location, next_symbol)
change = 100
with open("log.html", "w") as log_file:
log_file.write("<html><body>\n")
while change > 0.001:
change = apply_gravity(points, histories, speed=0.050)
log_file.write(graph_points(histories, points) + "\n")
log_file.flush()
log_file.write("</body></html>\n")
min_x = min([points[p].get_x() for p in points])
min_y = min([points[p].get_y() for p in points])
for symbol in points:
histories[symbol]["std_location"] = (
points[symbol].get_x() - min_x,
points[symbol].get_y() - min_y,
)
def pre_fetch_symbols(symbol_queue):
while True:
symbol = symbol_queue.get()
if symbol is None:
symbol_queue.put(None)
break
try:
get_symbol_history(symbol)
get_symbol_stats(symbol)
except:
pass
def main(args):
"""Plot various equities"""
expense_ratio_high_limit = args.max_expense_ratio
symbols = args.symbols
histories = {
x: calculate_variance(load_history(x), get_symbol_stats(x))
for x in args.symbols
}
add_distances(histories)
add_locations(histories)
with open("plot.html", "w") as plot_file:
plot_file.write("<html>\n")
plot_file.write("<head><style>td { text-align: right; }</style>\n")
plot_file.write("<body>\n")
plot_file.write(graph_points(histories, scale=20) + "\n")
plot_file.write("<table>\n")
plot_file.write("<tr><th>Symbol</th><th>Yield</th><th>Expense Ratio</th><th>Total Assets / Market Cap</th><th>Percent of total</th><th>Growth Rate</th><tr>\n")
market_sum = sum([histories[x]['stats']['total_value'] for x in histories])
for symbol in sorted(histories, key=lambda x:histories[x]['stats']['total_value'], reverse=True):
plot_file.write("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>\n"%(
symbol,
"%0.2f%%"%(100.0 * histories[symbol]['stats']['yield']),
"%0.2f%%"%(100.0 * histories[symbol]['stats']['expense_ratio']),
"%0.0f"%(histories[symbol]['stats']['total_value']),
"%0.2f%%"%(100.0 * histories[symbol]['stats']['total_value'] / market_sum),
"%0.2f%%"%(100.0 * histories[symbol]['slope']),
))
plot_file.write("</table>\n")
plot_file.write("</body></html>\n")
def parse_arguments():
parser = argparse.ArgumentParser(description ='Determine correlation between stock movement')
parser.add_argument('-e', '--max-expense-ratio', dest = 'max_expense_ratio', type=float, default=1.0, help = "Maximum expense ratio to allow")
parser.add_argument('symbols', nargs='+', help='Equity symbols')
args = parser.parse_args()
if len(args.symbols) <= 2:
parser.print_help()
print("You must specify at least two symbols")
sys.exit(1)
symbol_queue = queue.Queue()
fetchers = [start_thread(pre_fetch_symbols, symbol_queue) for _ in range(0, 8)]
for symbol in (args.symbols):
symbol_queue.put(symbol)
symbol_queue.put(None)
[t.join() for t in fetchers]
args.symbols = [s for s in args.symbols if get_symbol_stats(s)['expense_ratio'] * 100.0 <= args.max_expense_ratio]
if len(args.symbols) <= 2:
parser.print_help()
print("You must specify at least two symbols that have an expense ratio less than %0.2f%%"%(args.max_expense_ratio))
sys.exit(1)
print("Symbols less then expense ratio of %0.2f%%: %s"%(args.max_expense_ratio, ", ".join(args.symbols)))
return args
if __name__ == "__main__":
main(parse_arguments())
|
server.py
|
import backoff
import grpc
import logging
import queue
import redis
import threading
import time
import uuid
import log
from battleships_pb2 import Attack, Response, Status
from battleships_pb2_grpc import BattleshipsServicer
from game import Game
from message import Message
logger = log.get_logger(__name__)
logger.setLevel(logging.DEBUG)
class Battleship(BattleshipsServicer):
def __init__(self, redis_host, redis_port='6379', db=0):
"""Create a Battleship (server) instance.
:param redis_host: Hostname of Redis instance
:param redis_port: Port of Redis instance
:param db: Database to use within Redis instance
:raise ConnectionError: if connection to Redis fails
"""
logger.info('Starting Battleship. Connect to Redis '
f'at {redis_host}:{redis_port}.')
self.__r = redis.Redis(host=redis_host, port=redis_port, db=db)
if not self.ping_redis():
raise ConnectionError('Unable to connect to Redis server!')
else:
logger.info('Battleship server connected to Redis server.')
def Game(self, request_iterator, context):
"""This method is the implementation of the gRPC Game service.
When connected, this provides the main functionality of the
Battleship game.
:param request_iterator: iterator providing gRPC requests
:param context: a gRPC context object
:return: A generator providing gRPC responses
"""
server = _Server(self.__r)
with server:
yield from server.start(request_iterator, context)
def ping_redis(self):
"""Ping a Redis instance to see whether it's alive.
:return: True if connection to instance established, False otherwise
"""
@backoff.on_exception(backoff.expo,
redis.exceptions.ConnectionError,
max_time=60)
def __ping_redis():
"""Convenience function that does the actual Redis PING.
"""
logger.info('Pinging Redis server...')
return self.__r.ping()
try:
return __ping_redis()
except redis.exceptions.ConnectionError:
logger.error('Problem pinging Redis. Retry?')
return False
class _Server:
OpenGames = 'openGames'
def __init__(self, _redis):
self.__r = _redis
self.__q = queue.Queue()
self.__e = threading.Event()
self.__e.set()
self.__stream = None
self.__context = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, request_iterator, context):
"""Method that starts the actual server.
:param request_iterator: iterator that provides message
:param context: gRPC context object
"""
self.__stream = request_iterator
self.__context = context
while True:
request = self.recv()
if request is not None:
break
if not request.HasField('join'):
logger.error('Not a join message!')
return
player_id = request.join.id
if player_id == '':
logger.error('Player message ID is empty')
return
logger.info(f'Player {player_id} is attempting to join')
game, is_new = self.find_game_or_create()
logger.info(f'Connecting to game {game.id}. '
f'New? {"Yes" if is_new else "No"}')
logger.info('Setting up server to start receiving PubSub messages')
pubsub_thread = self.subscribe_redis(game, player_id)
if not self.connect_game(game, player_id, is_new):
logger.error('Unable to connect to a game!')
return
game_thread = self.subscribe_grpc(game, player_id)
yield from self.get()
logger.info('Stopping all threads')
game_thread.join()
pubsub_thread.stop()
self.close_open_game(game)
def stop(self):
"""Stop the game from running.
"""
self.__e.clear()
def connect_game(self, game, player_id, is_new):
"""Join an existing game or advertise this one as open if game
is not yet in progress.
:param game: Game
:param player_id: ID of player
:param is_new: True if game is new, False otherwise
"""
if is_new:
return self.add_open_game(game)
if not self.ensure_subscribers(game, 2):
return False
msg = Message(Message.BEGIN, player_id, '')
self.publish(game.id, msg)
return True
def recv(self):
"""Receive a gRPC message.
:return: gRPC message that was received
"""
try:
return next(self.__stream)
except grpc.RpcError:
logger.error('An RPC error occurred!')
self.stop()
except StopIteration:
logger.warning('recv() - iteration stopped')
self.stop()
def send(self, response):
"""Send a gRPC message.
:param response: Response to send to the client
"""
self.__q.put_nowait(response)
def get(self):
"""Get next message from the queue. It keeps running until it
sees that the is_running flag is False, then it returns.
:return: Next message in queue
"""
while self.is_running:
try:
yield self.__q.get(timeout=0.5)
except queue.Empty:
pass
@property
def is_running(self):
"""Is the game still running?
:return: True if running, False otherwise
"""
return self.__e.is_set()
def close(self):
"""Close connections, like the connection to the Redis instance.
"""
self.__r.close()
def subscribe_grpc(self, game, player_id):
"""Create a thread that handles incoming gRPC requests.
:param game: Game to handle requests for
:param player_id: Player this game server is handling
:return: Thread handling the gRPC requests
"""
game_thread = threading.Thread(
target=lambda: self.handle_grpc(game, player_id))
game_thread.daemon = True
game_thread.start()
return game_thread
def handle_grpc(self, game, player_id):
"""Handle actual gRPC requests.
:param game: Game to handle
:param player_id: Id of player this game server is handling
"""
while True:
request = self.recv()
if request is None:
return
if request.HasField('move'):
vector = request.move.vector
logger.info(f'({player_id}) - gRPC - {{Attack}} - '
f'{vector}')
# It must be my move if we have to handle an Attack
if game.my_turn:
msg = Message(Message.ATTACK, player_id, vector)
self.publish(game.id, msg)
else:
logger.error(f'({player_id}) - gRPC - '
'Got {Attack} request but not my turn!')
elif request.HasField('report'):
state = request.report.state
logger.info(f'({player_id}) - gRPC - {{Report}} - {state}. '
f'My Turn? {"Yes" if game.my_turn else "No"}.')
# It must not be my move if we have to handle a Report
if not game.my_turn:
if state == Status.State.DEFEAT:
msg = Message(Message.LOST, player_id, '')
else:
msg = Message(Message.STATUS, player_id, str(state))
self.publish(game.id, msg)
else:
logger.error(f'({player_id}) - gRPC - '
'Got {Report} request but my turn!')
else:
logger.error('Received an unknown message type!')
@property
def redis_conn(self):
"""Return Redis client as a property.
"""
return self.__r
def publish(self, channel, message):
"""Publish a message to Redis PubSub on a certain channel.
:param channel: Channel to use
:param message: Message to publish
"""
self.__r.publish(channel, message.dumps())
def subscribe_redis(self, game, player_id):
"""Subscribe to game.id channel but in a separate thread.
The handler that is used for the pubsub message is called
handle_pubsub, which is a method of this class.
:param game: Game of which the ID is used to subscribe
:param player_id: ID of player this game server is handling
:return: Thread that the handler is running in
"""
def get_pubsub_handler():
def handle_pubsub(msg):
return self.handle_pubsub(msg, game, player_id)
return handle_pubsub
logger.info(f'Subscribing to channel {game.id}')
p = self.__r.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{game.id: get_pubsub_handler()})
thread = p.run_in_thread(sleep_time=0.001)
return thread
def handle_pubsub(self, msg, game, player_id):
"""Handle published messages from Redis PubSub.
:param msg: PubSub message to handle
:param game: Game for which to handle messages
:param player_id: Player for which we're receiving messages
"""
message = Message.recreate(msg['data'])
message_type = message.type
if message_type == Message.BEGIN:
response = Response(turn=Response.State.BEGIN)
self.send(response)
if message.player == player_id:
# Stop this player's turn (this will start other player's turn)
message = Message(Message.STOP_TURN, player_id, '')
self.publish(game.id, message)
elif message_type == Message.STOP_TURN:
logger.info(f'({player_id}) - pubsub - '
f'Received STOP_TURN from player {message.player}')
if message.player == player_id:
logger.info(f'({player_id}) - '
f'Ending turn for player {player_id}')
game.end_turn()
turn = Response.State.STOP_TURN
else:
logger.info(f'({player_id}) - '
f'Starting turn for player {player_id}')
game.start_turn()
turn = Response.State.START_TURN
self.send(Response(turn=turn))
elif message_type == Message.ATTACK:
logger.info(f'({player_id}) - pubsub - '
f'Received ATTACK from player {message.player} '
f'with vector {message.data}.')
if message.player != player_id:
self.send(Response(move=Attack(vector=message.data)))
elif message_type == Message.STATUS:
states = {
'0': ('MISS', Status.State.MISS),
'1': ('HIT', Status.State.HIT),
'2': ('DEFEAT', Status.State.DEFEAT),
'3': ('SUNK', Status.State.SUNK),
}
state = states[message.data][0]
logger.info(f'({player_id}) - pubsub - '
f'Received STATUS from player {message.player} with '
f'state {state}.')
if message.player != player_id:
state = states[message.data][1]
self.send(Response(report=Status(state=state)))
# Stop this player's turn (this will start other
# player's turn). Because the status comes from the
# other player, it means that this player is the one who
# attacked and hence whose turn it was).
message = Message(Message.STOP_TURN, player_id, '')
self.publish(game.id, message)
elif message_type == Message.LOST:
logger.info(f'({player_id}) - pubsub - '
f'Received LOST from player {message.player}.')
turn = Response.State.LOSE
if message.player != player_id:
turn = Response.State.WIN
self.send(Response(turn=turn))
self.stop()
def ensure_subscribers(self, game, n):
"""Ensure that {n} listeners are subscribed to the id of the
game passed in as a parameter.
:param game: Game of which the ID is checked
:param n: The number of subscribers we're expecting
"""
for x in range(5):
values = self.__r.pubsub_numsub(game.id)
if len(values) < 1:
return False
_, nsub = values[0]
if n == nsub:
return True
time.sleep(0.1)
logger.error(f'Timeout trying to ensure {n} subscribers')
return False
def find_game_or_create(self):
"""Try to find an open game in Redis or create a new game if
none found.
:return: A tuple containing a Game object and a flag is_new
which indicates that a new game was created.
"""
b_game_id = self.__r.rpop(self.OpenGames)
# b_game_id is None if no open game found
is_new = b_game_id is None
if is_new:
logger.info('Could not find open game, creating new one')
game_id = str(uuid.uuid4())
else:
game_id = b_game_id.decode('utf-8')
return Game(game_id), is_new
def add_open_game(self, game):
"""Add an open game to the Redis instance so it can be discovered.
:param game: Game to be advertised
:return: True if successful, False otherwise
"""
logger.info(f'Adding open game {game.id}')
return self.__r.lpush(self.OpenGames, game.id)
def close_open_game(self, game):
"""Remove an open game from the Redis instance so it can no longer
be discovered.
:param game: Game to be closed
"""
logger.info(f'Closing open game {game.id}')
return self.__r.lrem(self.OpenGames, 1, game.id)
|
wikisourcetext.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot applies to Wikisource sites to upload text.
Text is uploaded to pages in Page ns, for a specified Index.
Text to be stored, if the page is not-existing, is preloaded from the file used
to create the Index page, making the upload feature independent from the format
of the file, as long as it is supported by the MW ProofreadPage extension.
As alternative, if '-ocr' option is selected,
https://phetools.toolforge.org/ OCR tool will be used to get text.
In this case, also already existing pages with quality value 'Not Proofread'
can be treated. '-force' will override existing page in this case.
TODO: update params + handle quality level
The following parameters are supported:
-index:... name of the index page.
-pages:<start>-<end>,...<start>-<end>,<start>-<end>
Page range to upload;
optional, start=1, end=djvu file number of images.
Page ranges can be specified as:
| A-B -> pages A until B
| A- -> pages A until number of images
| A -> just page A
| -B -> pages 1 until B
-showdiff: show difference between current text and new text when
saving the page.
-ocr: use OCR tools hosted on https://toolforge.org.
By default no OCR is done, i.e. only not-(yet)-existing
pages in Page ns will be treated and text will be fetched
via preload.
If -ocr is provided, default OCR method is:
- https://phetools.toolforge.org/
If ocr:googleOCR is given, OCR method is:
- https://ws-google-ocr.toolforge.org/
-threads:n number of threads used to fetch OCR from OCR tools.
default is 5; valid only if '-ocr' is selected.
-force: overwrite existing pages;
default is False; valid only if '-ocr' is selected.
-summary: custom edit summary.
Use quotes if edit summary contains spaces.
-always don't bother asking to confirm any of the changes.
"""
#
# (C) Pywikibot team, 2016-2020
#
# Distributed under the terms of the MIT license.
#
import collections
import itertools
import queue
import threading
import time
import pywikibot
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
from pywikibot.proofreadpage import IndexPage, ProofreadPage
class UploadTextBot(SingleSiteBot):
"""
A bot that uploads text-layer to Page:namespace.
Text is fetched via preload as on Wikisource wikis, text can be preloaded
only if a page does not exist, if an Index page is present.
Works only on sites with Proofread Page extension installed.
"""
def __init__(self, generator, **kwargs):
"""
Initializer.
If OCR is requested, spawns worker threads, and, if no "force" option
is set, filter for existing pages.
Queues are used for communication to/from threads.
A PriorityQueue is used to process pages in the same order as
they are generated.
@param generator: page generator
@type generator: generator
"""
self.available_options.update({
'showdiff': False,
'force': False,
'ocr': False,
'summary': 'Bot: uploading text',
'threads': 5
})
super().__init__(**kwargs)
self.generator = generator
# Get edit summary message if it's empty.
if not self.opt.summary:
self.opt.summary = i18n.twtranslate(self.site, 'djvutext-creating')
if self.opt.ocr:
self._num_threads = self.opt.threads
self._queue_in = queue.Queue()
self._queue_out = queue.PriorityQueue()
# If not "-force", no reason to get OCR for existing pages
# and to process them in Bot.run().
if not self.opt.force:
self.generator = (p for p in self.generator if not p.exists())
self._spawn_ocr_threads()
def _spawn_ocr_threads(self):
"""Spawn threads for _ocr_worker workers."""
for i in range(self._num_threads):
worker = threading.Thread(target=self._ocr_worker)
worker.setDaemon(True)
worker.start()
self._pages = collections.OrderedDict()
for idx, p in enumerate(self.generator):
self._pages.setdefault(p, idx)
self.generator = (p for p in self._pages) # recreate gen for run()
for p, idx in self._pages.items():
self._queue_in.put((p, idx)) # idx to preserve order later
def _ocr_worker(self):
"""Fetch OCR content from ocr_tool and queue it."""
while True:
page, idx = self._queue_in.get()
try:
text_body = page.ocr(ocr_tool=self.opt.ocr)
except ValueError as e:
pywikibot.error(e)
text_body = None # Sentinel: signal exception to self.treat()
self._queue_out.put((idx, text_body))
self._queue_in.task_done()
def _get_ocr(self, page):
"""Get OCR content for page from PriorityQueue."""
# blocks until OCR for expected idx is available
expected_idx = self._pages.get(page)
while True:
if self._queue_out.empty():
time.sleep(0.2) # some pause
continue
idx, text_body = self._queue_out.queue[0] # peek first element
if idx == expected_idx:
idx, text_body = self._queue_out.get()
return text_body
def treat(self, page):
"""Process one ProofreadPage page.
@param page: page to be treated.
@type page: ProofreadPage
@raises: pywikibot.Error
"""
if not isinstance(page, ProofreadPage):
raise pywikibot.Error('Page {} must be a ProofreadPage object.'
.format(page))
old_text = page.text if page.exists() else ''
if self.opt.ocr:
_body = self._get_ocr(page)
if _body is None:
pywikibot.output('No OCR found. Skipping {}'
.format(page.title(as_link=True)))
return
page.body = _body
if page.exists() and not (self.opt.ocr and self.opt.force):
pywikibot.output('Page {} already exists, not adding!'
.format(page))
else:
self.userPut(page, old_text, page.text, summary=self.opt.summary,
show_diff=self.opt.showdiff)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
index = None
pages = '1-'
options = {}
# Parse command line arguments.
local_args = pywikibot.handle_args(args)
for arg in local_args:
arg, sep, value = arg.partition(':')
if arg == '-index':
index = value
elif arg == '-pages':
pages = value
elif arg == '-showdiff':
options['showdiff'] = True
elif arg == '-summary':
options['summary'] = value
elif arg == '-ocr':
options['ocr'] = value or 'phetools'
elif arg == '-threads':
options['threads'] = int(value)
elif arg == '-force':
options['force'] = True
elif arg == '-always':
options['always'] = True
else:
pywikibot.output('Unknown argument ' + arg)
# index is mandatory.
if not index:
pywikibot.bot.suggest_help(missing_parameters=['-index'])
return
# '-force' can be used with '-ocr' only.
if 'force' in options and 'ocr' not in options:
pywikibot.error("'-force' can be used with '-ocr' option only.")
return
site = pywikibot.Site()
if not site.has_extension('ProofreadPage'):
pywikibot.error('Site {} must have ProofreadPage extension.'
.format(site))
return
index = IndexPage(site, index)
if not index.exists():
pywikibot.error("Page {} doesn't exist.".format(index))
return
# Parse pages param.
# Create a list of (start, end) tuples.
pages = pages.split(',')
for interval in range(len(pages)):
start, sep, end = pages[interval].partition('-')
start = 1 if not start else int(start)
if not sep:
end = start
else:
end = int(end) if end else index.num_pages
pages[interval] = (start, end)
# gen yields ProofreadPage objects.
gen_list = []
for start, end in sorted(pages):
gen = index.page_gen(start=start, end=end,
filter_ql=[1], content=True)
gen_list.append(gen)
gen = itertools.chain(*gen_list)
pywikibot.output('\nUploading text to {}\n'
.format(index.title(as_link=True)))
bot = UploadTextBot(gen, site=index.site, **options)
bot.run()
if __name__ == '__main__':
try:
main()
except Exception:
pywikibot.error('Fatal error:', exc_info=True)
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
self.specs = [f().spec for f in env_fns]
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def _flatten_obs(obs):
assert isinstance(obs, list) or isinstance(obs, tuple)
assert len(obs) > 0
if isinstance(obs[0], dict):
import collections
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
|
example_test.py
|
import re
import os
import sys
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
try:
import IDF
except ImportError:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
import random
import subprocess
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, "w+") as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, "server_key.pem")
create_file(key_file, server_key)
return server_file, key_file
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example")
server_port = 8002
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
IDF.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
dut1.reset()
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example")
server_port = 8002
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
IDF.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("native_ota_example: Image validation failed, image is corrupted", timeout=20)
os.remove(binary_file)
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example")
server_port = 8002
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
IDF.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("native_ota_example: received package is not fit len", timeout=20)
os.remove(binary_file)
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example")
server_port = 8002
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
IDF.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=20)
os.remove(binary_file)
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example")
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
IDF.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Connect to Wifi ! Start to Connect to Server....", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
|
__init__.py
|
"""
S3 Binding Module with logging handler and stream object
"""
__author__ = 'Omri Eival'
import atexit
import signal
import threading
import queue
import gzip
import codecs
from logging import StreamHandler
from io import BufferedIOBase, BytesIO
from boto3 import Session
import time
from aws_logging_handlers.validation import is_non_empty_string, is_positive_int, empty_str_err, bad_integer_err, ValidationRule
from aws_logging_handlers.tasks import Task, task_worker, STOP_SIGNAL
DEFAULT_CHUNK_SIZE = 5 * 1024 ** 2 # 5 MB
DEFAULT_ROTATION_TIME_SECS = 12 * 60 * 60 # 12 hours
MAX_FILE_SIZE_BYTES = 100 * 1024 ** 2 # 100 MB
MIN_WORKERS_NUM = 1
class StreamObject:
"""
Class representation of the AWS s3 object along with all the needed metadata to stream to s3
"""
def __init__(self, s3_resource, bucket_name, filename, buffer_queue, encryption):
self.object = s3_resource.Object(bucket_name, filename)
self.uploader = self.object.initiate_multipart_upload(**encryption)
self.bucket = bucket_name
try:
total_bytes = s3_resource.meta.client.head_object(Bucket=self.bucket.name, Key=filename)
except Exception:
total_bytes = 0
self.buffer = BytesIO()
self.chunk_count = 0
self.byte_count = total_bytes
self.parts = []
self.tasks = buffer_queue
def add_task(self, task):
"""
Add a task to the tasks queue
:param task: Task object
:return:
"""
self.tasks.put(task)
def join_tasks(self):
"""
Join all the tasks
:return:
"""
self.tasks.join()
class S3Stream(BufferedIOBase):
"""
stream interface used by the handler
"""
def __init__(self, bucket: str, key: str, *, chunk_size: int = DEFAULT_CHUNK_SIZE,
max_file_log_time: int = DEFAULT_ROTATION_TIME_SECS, max_file_size_bytes: int = MAX_FILE_SIZE_BYTES,
encoder: str = 'utf-8', workers: int = 1, compress: bool = False, encryption_options: dict = None, **boto_session_kwargs):
"""
:param bucket: name of the s3 bucket
:type bucket: str
:param key: s3 key path
:type key: str
:param chunk_size: size of multipart upload chunk size (default 5MB)
:type chunk_size: int
:param max_file_log_time: threshold period for a log period until file rotation (default 12 Hours)
:type max_file_log_time: int
:param max_file_size_bytes: threshold for file rotation by bytes (default 100MB)
:type max_file_size_bytes: int
:param encoder: the encoder to be used for log records (default 'utf-8')
:type encoder: str
:param workers: the number of background workers that rotate log records (default 1)
:type workers: int
:param compress: flag indication for archiving the content of a file
:type compress: bool
:param boto_session_kwargs: additional keyword arguments for the AWS Kinesis Resource
:type boto_session_kwargs: boto3 resource keyword arguments
"""
self._stream_buffer_queue = queue.Queue()
self._rotation_queue = queue.Queue()
self._session = Session()
self.s3 = self._session.resource('s3', **boto_session_kwargs)
self.start_time = int(time.time())
self.key = key
self.chunk_size = chunk_size
self.max_file_log_time = max_file_log_time
self.max_file_size_bytes = max_file_size_bytes
self.current_file_name = "{}_{}".format(key, int(time.time()))
self.encryption_options = encryption_options if encryption_options else {}
if compress:
self.current_file_name = "{}.gz".format(self.current_file_name)
self.encoder = encoder
self.bucket = bucket
self._current_object = self._get_stream_object(self.current_file_name)
self.workers = [threading.Thread(target=task_worker, args=(self._rotation_queue,), daemon=True).start() for _ in
range(int(max(workers, MIN_WORKERS_NUM) / 2) + 1)]
self._stream_bg_workers = [threading.Thread(target=task_worker, args=(self._stream_buffer_queue,), daemon=True).start() for _
in range(max(int(max(workers, MIN_WORKERS_NUM) / 2), 1))]
self._is_open = True
self.compress = compress
BufferedIOBase.__init__(self)
@property
def bucket(self):
return self._bucket
@bucket.setter
def bucket(self, val):
if not val:
raise ValueError("Bucket name is invalid")
try:
self.s3.meta.client.head_bucket(Bucket=val)
except Exception:
raise ValueError('Bucket %s does not exist, or insufficient permissions' % val)
self._bucket = self.s3.Bucket(val)
@property
def key(self):
return self._key
@key.setter
def key(self, val):
if not val:
raise ValueError("Given key is invalid")
self._key = val.strip('/')
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, val):
_ = codecs.getencoder(val)
self._encoder = val
def get_filename(self):
"""
returns a log file name
:return: name of the log file in s3
"""
filename = "{}_{}".format(self.key, self.start_time)
if not self.compress:
return filename
return "{}.gz".format(filename)
def _add_task(self, task):
self._rotation_queue.put(task)
def _join_tasks(self):
self._rotation_queue.join()
def _get_stream_object(self, filename):
try:
return StreamObject(self.s3, self.bucket.name, filename, self._stream_buffer_queue, self.encryption_options)
except Exception:
raise RuntimeError('Failed to open new S3 stream object')
def _rotate_chunk(self, run_async=True):
assert self._current_object, "Stream object not found"
part_num = self._current_object.chunk_count + 1
part = self._current_object.uploader.Part(part_num)
buffer = self._current_object.buffer
self._current_object.buffer = BytesIO()
buffer.seek(0)
if run_async:
self._current_object.add_task(Task(self._upload_part, self._current_object, part, part_num, buffer))
else:
self._upload_part(self._current_object, part, part_num, buffer)
self._current_object.chunk_count += 1
@staticmethod
def _upload_part(s3_object, part, part_num, buffer):
upload = part.upload(Body=buffer)
s3_object.parts.append({'ETag': upload['ETag'], 'PartNumber': part_num})
def _rotate_file(self):
if self._current_object.buffer.tell() > 0:
self._rotate_chunk()
temp_object = self._current_object
self._add_task(Task(self._close_stream, stream_object=temp_object))
self.start_time = int(time.time())
new_filename = self.get_filename()
self._current_object = self._get_stream_object(new_filename)
@staticmethod
def _close_stream(stream_object, callback=None, *args, **kwargs):
stream_object.join_tasks()
if stream_object.chunk_count > 0:
stream_object.uploader.complete(MultipartUpload={'Parts': sorted(stream_object.parts, key=lambda p: p['PartNumber'])})
else:
stream_object.uploader.abort()
if callback and callable(callback):
callback(*args, **kwargs)
def close(self, *args, **kwargs):
"""
close the stream for writing, upload remaining log records in stream
:param args:
:param kwargs:
:return:
"""
if self._current_object.buffer.tell() > 0:
self._rotate_chunk(run_async=False)
self._current_object.join_tasks()
self._join_tasks()
self._close_stream(self._current_object)
# Stop the worker threads
for _ in range(len(self.workers)):
self._rotation_queue.put(STOP_SIGNAL)
for _ in range(len(self._stream_bg_workers)):
self._stream_buffer_queue.put(STOP_SIGNAL)
self._is_open = False
@property
def closed(self):
return not self._is_open
@property
def writable(self, *args, **kwargs):
return True
def tell(self, *args, **kwargs):
"""
indication of current size of the stream before rotation
:param args:
:param kwargs:
:return: size of the current stream
"""
return self._current_object.byte_count
def write(self, *args, **kwargs):
"""
writes a log record to the stream
:param args:
:param kwargs:
:return: size of record that was written
"""
s = self.compress and gzip.compress(args[0].encode(self.encoder)) or args[0].encode(self.encoder)
self._current_object.buffer.write(s)
self._current_object.byte_count = self._current_object.byte_count + len(s)
return len(s)
def flush(self, *args, **kwargs):
"""
flushes the current stream if it exceeds the threshold size
:return:
"""
if self._current_object.buffer.tell() > self.chunk_size:
self._rotate_chunk()
if (self.max_file_size_bytes and self._current_object.byte_count > self.max_file_size_bytes) or (
self.max_file_log_time and int(
time.time()) - self.start_time > self.max_file_log_time):
self._rotate_file()
class S3Handler(StreamHandler):
"""
A Logging handler class that streams log records to S3 by chunks
"""
def __init__(self, key: str, bucket: str, *, chunk_size: int = DEFAULT_CHUNK_SIZE,
time_rotation: int = DEFAULT_ROTATION_TIME_SECS, max_file_size_bytes: int = MAX_FILE_SIZE_BYTES,
encoder: str = 'utf-8',
workers: int = 1, compress: bool = False, **boto_session_kwargs):
"""
:param key: The path of the S3 object
:type key: str
:param bucket: The id of the S3 bucket
:type bucket: str
:param chunk_size: size of a chunk in the multipart upload in bytes (default 5MB)
:type chunk_size: int
:param time_rotation: Interval in seconds to rotate the file by (default 12 hours)
:type time_rotation: int
:param max_file_size_bytes: maximum file size in bytes before rotation (default 100MB)
:type max_file_size_bytes: int
:param encoder: default utf-8
:type encoder: str
:param workers: the number of workers that a stream handler would run for
file and chunk rotation tasks; only useful if emitting lots of records
:type workers: int
:param compress: indicating whether to save a compressed gz-suffixed file
:type compress: bool
"""
args_validation = (
ValidationRule(time_rotation, is_positive_int, bad_integer_err('time_rotation')),
ValidationRule(max_file_size_bytes, is_positive_int, bad_integer_err('max_file_size_bytes')),
ValidationRule(encoder, is_non_empty_string, empty_str_err('encoder')),
ValidationRule(workers, is_positive_int, bad_integer_err('workers')),
)
for rule in args_validation:
assert rule.func(rule.arg), rule.message
self.bucket = bucket
self.stream = S3Stream(self.bucket, key, chunk_size=chunk_size, max_file_log_time=time_rotation,
max_file_size_bytes=max_file_size_bytes, encoder=encoder, workers=workers,
compress=compress, **boto_session_kwargs)
# Make sure we gracefully clear the buffers and upload the missing parts before exiting
self._sigterm_handler = signal.signal(signal.SIGTERM, self._teardown)
self._sigint_handler = signal.signal(signal.SIGINT, self._teardown)
self._sigquit_handler = signal.signal(signal.SIGQUIT, self._teardown)
atexit.register(self.close)
StreamHandler.__init__(self, self.stream)
def _teardown(self, signum: int, frame):
self.close()
if signum == signal.SIGTERM:
self._sigterm_handler(signum, frame)
elif signum == signal.SIGINT:
self._sigint_handler(signum, frame)
elif signum == signal.SIGQUIT:
self._sigquit_handler(signum, frame)
def close(self, *args, **kwargs):
"""
Closes the stream
"""
self.acquire()
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close(*args, **kwargs)
finally:
self.release()
|
bad_thread_instantiation.py
|
# pylint: disable=missing-docstring
import threading
threading.Thread(lambda: None).run() # [bad-thread-instantiation]
threading.Thread(None, lambda: None)
threading.Thread(group=None, target=lambda: None).run()
threading.Thread() # [bad-thread-instantiation]
|
piman.py
|
import logging
import logging.config
import os
from zipfile import ZipFile
import io
import time
# create the logger before doing imports since everyone is going
# to use them
local_logfile = './logging.conf'
if os.path.isfile(local_logfile):
logging.config.fileConfig(local_logfile)
else:
zipfile = os.path.dirname(__file__)
with ZipFile(zipfile) as z:
fd = z.open("logging.conf", mode='r')
# convert to a string
confstr = fd.read().decode()
logging.config.fileConfig(io.StringIO(confstr))
#create logger using configuration
logger = logging.getLogger('pimanlogger')
from threading import Thread
from sys import argv
from config_ui import web_ui
from dhcp import dhcp
from tcp import tcp
from tftp import tftp
from utility import power_cycle
from utility import mac_mapper
from piman import logger
from parse_config import config
import ntpserver
'''
piman.py
Attributes:
-----
data_dir : str
the directory of files needed for pis to boot
tftp_port : int
tftp use udp port 69 to establish network connection
tcp_port : int
tcp port number for tcp to establish network connection
ip : str
network ip address of pis and ip address of the router
subnet_mask : str
subnet mask for the ip address of pis
switch_address : str
ip address of the switch that connect pis
mac_ip_file : str
address of the file that save the mac address of pis and its ip address
Methods
-----
server()
Start tftp, dhcp, tcp connection between server and pis
restart(switch_address, port)
to restart the specific pi
reinstall(switch_address, port)
to reinstall a specific pi
exit_piman()
to exit piman
'''
data_dir = "./install/boot"
tftp_port = 69
tcp_port = 3333
ip = config['server_address']
subnet_mask = config['subnet_mask']
mac_ip_file = "hosts.csv"
lease_time = 600
interface = config['interface']
def server():
config_ui_thread = Thread(target=config_ui, args=[
"", "./piman.yaml", "./hosts.csv"], name="config_ui")
config_ui_thread.start()
tftp_thread = Thread(target=tftp.do_tftpd, args=[
data_dir, ip, tftp_port], name="tftpd")
tftp_thread.start()
dhcp_thread = Thread(target=dhcp.do_dhcp, args=[
mac_ip_file, subnet_mask, ip, lease_time, interface], name="dhcpd")
dhcp_thread.start()
tcp_thread = Thread(target=tcp.do_tcp, args=[
data_dir, tcp_port, ip], name="tcp")
tcp_thread.start()
ntp_thread = Thread(target=ntpserver.do_ntp())
ntp_thread.start()
config_ui_thread.join()
tftp_thread.join()
dhcp_thread.join()
tcp_thread.join()
ntp_thread.join()
def restart(switch_address, interface, ports):
for port in ports:
power_cycle.power_cycle(switch_address, interface, port)
def reinstall(switch_address, interface, port):
with open("reinstall.txt", "w") as f:
network_addr = ip[:7] + str(interface) + "." + str(port)
f.write(network_addr)
power_cycle.power_cycle(switch_address, interface, port)
def mapper(switch_address,interface, port, file):
for portNum in port:
power_cycle.power_cycle(switch_address,interface, portNum)
time.sleep(30)
mac_mapper.mac_mapper(file)
def config_ui(name, config_path, hosts_csv_path):
web_ui.start(name, config_path, hosts_csv_path)
def exit_piman():
logger.error("Insufficient amount of arguments")
exit(1)
if __name__ == "__main__":
args = "Arguments: "
for a in argv:
args += a + " "
logger.info(args)
if len(argv) < 2:
exit_piman()
if argv[1] == "server":
server()
elif argv[1] == "restart":
if len(argv) < 5:
exit_piman()
restart(argv[2], argv[3],argv[4])
elif argv[1] == "mapper":
if len(argv) < 5:
exit_piman()
mapper(argv[2],argv[3],argv[4])
elif argv[1] == "reinstall":
if len(argv) < 5:
exit_piman()
reinstall(argv[2], argv[3], argv[4])
elif argv[1] == "config":
config_ui(argv[2], argv[3], argv[4])
|
network.py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import queue
import os
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = bitcoin.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = bitcoin.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port'), p.get('user'), p.get('password')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = bitcoin.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
p = x.pop('params')
addr = self.h2addr[p[0]]
x['params'] = [addr]
callback(x)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
interface.request = idx
interface.req_time = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
# Ignore unsolicited chunks
index = params[0]
if interface.request != index:
return
connect = interface.blockchain.connect_chunk(index, result)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
if b.get_hash(0) == bitcoin.GENESIS:
self.downloading_headers = False
return
filename = b.path()
def download_thread():
try:
import urllib.request, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.HEADERS_URL)
urllib.request.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock: b.update_size()
self.downloading_headers = False
self.downloading_headers = True
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.downloading_headers:
time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, self.interfaces.values()))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
audio_reader.py
|
import fnmatch
import os
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
for filename in files:
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename
def load_vctk_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the VCTK dataset, and
additionally the ID of the corresponding speaker.'''
files = find_files(directory)
speaker_re = re.compile(r'p([0-9]+)_([0-9]+)\.wav')
for filename in files:
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
matches = speaker_re.findall(filename)[0]
speaker_id, recording_id = [int(id_) for id_ in matches]
yield audio, speaker_id
def trim_silence(audio, threshold):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
sample_size=None,
silence_threshold=None,
queue_size=256):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.silence_threshold = silence_threshold
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def thread_main(self, sess):
buffer_ = np.array([])
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
if self.sample_size:
# Cut samples into fixed size pieces
buffer_ = np.append(buffer_, audio)
while len(buffer_) > self.sample_size:
piece = np.reshape(buffer_[:self.sample_size], [-1, 1])
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
buffer_ = buffer_[self.sample_size:]
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
fsspec_utils.py
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
from threading import Thread
import numpy as np
from pyarrow import parquet as pq
try:
import cudf
from cudf.core.column import as_column, build_categorical_column
except ImportError:
cudf = None
#
# Parquet-Specific Utilities
#
def _optimized_read_partition_remote(
fs, pieces, columns, index, categories=(), partitions=(), **kwargs
):
# This is a specialized version of `CudfEngine.read_partition`
# for remote filesystems. This implementation is intended to
# replace the upstream `read_partition` classmethod until
# remote-filesystem handling is optimized in cudf/dask-cudf
if columns is not None:
columns = list(columns)
if isinstance(index, list):
columns += index
# Check that this is a single-piece read on a non-local filesystem
if not isinstance(pieces, list):
pieces = [pieces]
if len(pieces) > 1:
raise ValueError(
"The `_custom_read_partition` code path is not designed to "
"handle a multi-element `pieces` argument."
)
if cudf.utils.ioutils._is_local_filesystem(fs):
raise ValueError(
"The `_custom_read_partition` code path is not intended "
"for use on local filesystems."
)
# Unpack contents of the single piece
if isinstance(pieces[0], str):
path = pieces[0]
row_group = None
partition_keys = []
else:
(path, row_group, partition_keys) = pieces[0]
# Call optimized read utility
df = _optimized_read_remote(path, row_group, columns, fs, **kwargs)
#
# Code below is directly copied from cudf-21.08
#
if index and (index[0] in df.columns):
df = df.set_index(index[0])
elif index is False and set(df.index.names).issubset(columns):
# If index=False, we need to make sure all of the
# names in `columns` are actually in `df.columns`
df.reset_index(inplace=True)
if partition_keys:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(partition_keys):
categories = [val.as_py() for val in partitions.levels[i].dictionary]
col = as_column(index2).as_frame().repeat(len(df))._data[None]
df[name] = build_categorical_column(
categories=categories,
codes=as_column(col.base_data, dtype=col.dtype),
size=col.size,
offset=col.offset,
ordered=False,
)
return df
def _optimized_read_remote(path, row_groups, columns, fs, **kwargs):
if row_groups is not None and not isinstance(row_groups, list):
row_groups = [row_groups]
# Get byte-ranges that are known to contain the
# required data for this read
byte_ranges, footer, file_size = _get_parquet_byte_ranges(
path, row_groups, columns, fs, **kwargs
)
# Transfer the required byte-ranges with fsspec.
# Store these blocks in a local dummy buffer
dummy_buffer = _fsspec_data_transfer(
path,
fs,
byte_ranges=byte_ranges,
footer=footer,
file_size=file_size,
add_par1_magic=True,
**kwargs,
)
# Call cudf.read_parquet on the dummy buffer
strings_to_cats = kwargs.get("strings_to_categorical", False)
df = cudf.read_parquet(
dummy_buffer,
engine="cudf",
columns=columns,
row_groups=row_groups,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
del dummy_buffer
return df
def _get_parquet_byte_ranges(
path,
rgs,
columns,
fs,
bytes_per_thread=256_000_000,
**kwargs,
):
# The purpose of this utility is to return a list
# of byte ranges (in path) that are known to contain
# the data needed to read `columns` and `rgs`
# Step 0 - Get size of file
file_size = fs.size(path)
# Return early if the file is too small to merit
# optimized data transfer
if file_size <= bytes_per_thread:
return None, None, file_size
# Step 1 - Get 32 KB from tail of file.
#
# This "sample size" can be tunable, but should
# always be >= 8 bytes (so we can read the footer size)
tail_size = 32_000
footer_sample = fs.tail(path, tail_size)
# Step 2 - Read the footer size and re-read a larger
# tail if necessary
footer_size = int.from_bytes(footer_sample[-8:-4], "little")
if tail_size < (footer_size + 8):
footer_sample = fs.tail(path, footer_size + 8)
# Step 3 - Collect required byte ranges
byte_ranges = []
md = pq.ParquetFile(io.BytesIO(footer_sample)).metadata
for r in range(md.num_row_groups):
# Skip this row-group if we are targetting
# specific row-groups
if rgs is None or r in rgs:
row_group = md.row_group(r)
for c in range(row_group.num_columns):
column = row_group.column(c)
name = column.path_in_schema
# Skip this column if we are targetting a
# specific columns
if columns is None or name in columns:
file_offset0 = column.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.data_page_offset
num_bytes = column.total_uncompressed_size
byte_ranges.append((file_offset0, num_bytes))
return byte_ranges, footer_sample, file_size
#
# Genral Fsspec Data-transfer Optimization Code
#
def _fsspec_data_transfer(
path_or_fob,
fs,
byte_ranges=None,
footer=None,
file_size=None,
add_par1_magic=None,
bytes_per_thread=256_000_000,
max_gap=64_000,
mode="rb",
**kwargs,
):
# Calculate total file size
file_size = file_size or fs.size(path_or_fob)
# Check if a direct read makes the most sense
if not byte_ranges and bytes_per_thread >= file_size:
return fs.open(path_or_fob, mode=mode, cache_type="none").read()
# Threaded read into "dummy" buffer
buf = np.zeros(file_size, dtype="b")
if byte_ranges:
# Optimize/merge the ranges
byte_ranges = _merge_ranges(
byte_ranges,
max_block=bytes_per_thread,
max_gap=max_gap,
)
# Call multi-threaded data transfer of
# remote byte-ranges to local buffer
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
# Add Header & Footer bytes
if footer is not None:
footer_size = len(footer)
buf[-footer_size:] = np.frombuffer(footer[-footer_size:], dtype="b")
# Add parquet magic bytes (optional)
if add_par1_magic:
buf[:4] = np.frombuffer(b"PAR1", dtype="b")
if footer is None:
buf[-4:] = np.frombuffer(b"PAR1", dtype="b")
else:
byte_ranges = [
(b, min(bytes_per_thread, file_size - b)) for b in range(0, file_size, bytes_per_thread)
]
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
return buf.tobytes()
def _merge_ranges(byte_ranges, max_block=256_000_000, max_gap=64_000):
# Simple utility to merge small/adjacent byte ranges
new_ranges = []
if not byte_ranges:
# Early return
return new_ranges
offset, size = byte_ranges[0]
for (new_offset, new_size) in byte_ranges[1:]:
gap = new_offset - (offset + size)
if gap > max_gap or (size + new_size + gap) > max_block:
# Gap is too large or total read is too large
new_ranges.append((offset, size))
offset = new_offset
size = new_size
continue
size += new_size + gap
new_ranges.append((offset, size))
return new_ranges
def _assign_block(fs, path_or_fob, local_buffer, offset, nbytes):
with fs.open(path_or_fob, mode="rb", cache_type="none") as fob:
fob.seek(offset)
local_buffer[offset : offset + nbytes] = np.frombuffer(
fob.read(nbytes),
dtype="b",
)
def _read_byte_ranges(
path_or_fob,
ranges,
local_buffer,
fs,
**kwargs,
):
workers = []
for (offset, nbytes) in ranges:
if len(ranges) > 1:
workers.append(
Thread(target=_assign_block, args=(fs, path_or_fob, local_buffer, offset, nbytes))
)
workers[-1].start()
else:
_assign_block(fs, path_or_fob, local_buffer, offset, nbytes)
for worker in workers:
worker.join()
|
snapraid-runner.py
|
#!/usr/bin/env python3
from __future__ import division
import argparse
import configparser
import logging
import logging.handlers
import os.path
import subprocess
import sys
import threading
import time
import traceback
from collections import Counter, defaultdict
from io import StringIO
# Global variables
config = None
email_log = None
def tee_log(infile, out_lines, log_level):
"""
Create a thread that saves all the output on infile to out_lines and
logs every line with log_level
"""
def tee_thread():
for line in iter(infile.readline, ""):
line = line.strip()
# Do not log the progress display
if "\r" in line:
line = line.split("\r")[-1]
logging.log(log_level, line.strip())
out_lines.append(line)
infile.close()
t = threading.Thread(target=tee_thread)
t.daemon = True
t.start()
return t
def snapraid_command(command, args={}, *, allow_statuscodes=[]):
"""
Run snapraid command
Raises subprocess.CalledProcessError if errorlevel != 0
"""
arguments = ["--conf", config["snapraid"]["config"]]
for (k, v) in args.items():
arguments.extend(["--" + k, str(v)])
p = subprocess.Popen(
[config["snapraid"]["executable"], command] + arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
# Snapraid always outputs utf-8 on windows. On linux, utf-8
# also seems a sensible assumption.
encoding="utf-8",
errors="replace"
)
out = []
threads = [
tee_log(p.stdout, out, logging.OUTPUT),
tee_log(p.stderr, [], logging.OUTERR)]
for t in threads:
t.join()
ret = p.wait()
# sleep for a while to make pervent output mixup
time.sleep(0.3)
if ret == 0 or ret in allow_statuscodes:
return out
else:
raise subprocess.CalledProcessError(ret, "snapraid " + command)
def send_discord(success):
import json
import urllib.request
url = config['discord']['webhook']
if success:
body = "SnapRAID job completed successfully:\n"
else:
body = "Error during SnapRAID job:\n"
log = email_log.getvalue()
if len(log) > 2000:
log = log[:1800] + '--------- LOG WAS TOO BIG ----------'
body += f"```\n{log}\n```"
payload = {
'username': 'SnapRAID Runner',
'content': body
}
params = json.dumps(payload).encode('utf8')
headers = {
'content-type': 'application/json',
'user-agent': 'snapraid-runner/0.1'
}
try:
req = urllib.request.Request(url, method="POST",
headers=headers)
res = urllib.request.urlopen(req, data=params)
res.read().decode('utf8')
except Exception as e:
print(e)
def send_email(success):
import smtplib
from email.mime.text import MIMEText
from email import charset
if len(config["smtp"]["host"]) == 0:
logging.error("Failed to send email because smtp host is not set")
return
# use quoted-printable instead of the default base64
charset.add_charset("utf-8", charset.SHORTEST, charset.QP)
if success:
body = "SnapRAID job completed successfully:\n\n\n"
else:
body = "Error during SnapRAID job:\n\n\n"
log = email_log.getvalue()
maxsize = config['email'].get('maxsize', 500) * 1024
if maxsize and len(log) > maxsize:
cut_lines = log.count("\n", maxsize // 2, -maxsize // 2)
log = (
"NOTE: Log was too big for email and was shortened\n\n" +
log[:maxsize // 2] +
"[...]\n\n\n --- LOG WAS TOO BIG - {} LINES REMOVED --\n\n\n[...]".format(
cut_lines) +
log[-maxsize // 2:])
body += log
msg = MIMEText(body, "plain", "utf-8")
msg["Subject"] = config["email"]["subject"] + \
(" SUCCESS" if success else " ERROR")
msg["From"] = config["email"]["from"]
msg["To"] = config["email"]["to"]
smtp = {"host": config["smtp"]["host"]}
if config["smtp"]["port"]:
smtp["port"] = config["smtp"]["port"]
if config["smtp"]["ssl"]:
server = smtplib.SMTP_SSL(**smtp)
else:
server = smtplib.SMTP(**smtp)
if config["smtp"]["tls"]:
server.starttls()
if config["smtp"]["user"]:
server.login(config["smtp"]["user"], config["smtp"]["password"])
server.sendmail(
config["email"]["from"],
[config["email"]["to"]],
msg.as_string())
server.quit()
def finish(is_success):
if ("error", "success")[is_success] in config["email"]["sendon"]:
try:
if config['smtp']['enabled']:
send_email(is_success)
if config['discord']['enabled']:
send_discord(is_success)
except Exception:
logging.exception("Failed to send email")
if is_success:
logging.info("Run finished successfully")
else:
logging.error("Run failed")
sys.exit(0 if is_success else 1)
def load_config(args):
global config
parser = configparser.RawConfigParser()
parser.read(args.conf)
sections = ["snapraid", "logging", "email", "smtp", "scrub", "discord"]
config = dict((x, defaultdict(lambda: "")) for x in sections)
for section in parser.sections():
for (k, v) in parser.items(section):
config[section][k] = v.strip()
int_options = [
("snapraid", "deletethreshold"), ("logging", "maxsize"),
("scrub", "percentage"), ("scrub", "older-than"), ("email", "maxsize"),
]
for section, option in int_options:
try:
config[section][option] = int(config[section][option])
except ValueError:
config[section][option] = 0
config["smtp"]["enabled"] = (config["smtp"]["enabled"].lower() == "true")
config["smtp"]["ssl"] = (config["smtp"]["ssl"].lower() == "true")
config["smtp"]["tls"] = (config["smtp"]["tls"].lower() == "true")
config["scrub"]["enabled"] = (config["scrub"]["enabled"].lower() == "true")
config["discord"]["enabled"] = (
config["discord"]["enabled"].lower() == "true")
config["email"]["short"] = (config["email"]["short"].lower() == "true")
config["snapraid"]["touch"] = (
config["snapraid"]["touch"].lower() == "true")
if args.scrub is not None:
config["scrub"]["enabled"] = args.scrub
def setup_logger():
log_format = logging.Formatter(
"%(asctime)s [%(levelname)-6.6s] %(message)s")
root_logger = logging.getLogger()
logging.OUTPUT = 15
logging.addLevelName(logging.OUTPUT, "OUTPUT")
logging.OUTERR = 25
logging.addLevelName(logging.OUTERR, "OUTERR")
root_logger.setLevel(logging.OUTPUT)
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(log_format)
root_logger.addHandler(console_logger)
if config["logging"]["file"]:
max_log_size = max(config["logging"]["maxsize"], 0) * 1024
file_logger = logging.handlers.RotatingFileHandler(
config["logging"]["file"],
maxBytes=max_log_size,
backupCount=9)
file_logger.setFormatter(log_format)
root_logger.addHandler(file_logger)
if config["email"]["sendon"]:
global email_log
email_log = StringIO()
email_logger = logging.StreamHandler(email_log)
email_logger.setFormatter(log_format)
if config["email"]["short"]:
# Don't send programm stdout in email
email_logger.setLevel(logging.INFO)
root_logger.addHandler(email_logger)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conf",
default="snapraid-runner.conf",
metavar="CONFIG",
help="Configuration file (default: %(default)s)")
parser.add_argument("--no-scrub", action='store_false',
dest='scrub', default=None,
help="Do not scrub (overrides config)")
args = parser.parse_args()
if not os.path.exists(args.conf):
print("snapraid-runner configuration file not found")
parser.print_help()
sys.exit(2)
try:
load_config(args)
except Exception:
print("unexpected exception while loading config")
print(traceback.format_exc())
sys.exit(2)
try:
setup_logger()
except Exception:
print("unexpected exception while setting up logging")
print(traceback.format_exc())
sys.exit(2)
try:
run()
except Exception:
logging.exception("Run failed due to unexpected exception:")
finish(False)
def run():
logging.info("=" * 60)
logging.info("Run started")
logging.info("=" * 60)
if not os.path.isfile(config["snapraid"]["executable"]):
logging.error("The configured snapraid executable \"{}\" does not "
"exist or is not a file".format(
config["snapraid"]["executable"]))
finish(False)
if not os.path.isfile(config["snapraid"]["config"]):
logging.error("Snapraid config does not exist at " +
config["snapraid"]["config"])
finish(False)
if config["snapraid"]["touch"]:
logging.info("Running touch...")
snapraid_command("touch")
logging.info("*" * 60)
logging.info("Running diff...")
diff_out = snapraid_command("diff", allow_statuscodes=[2])
logging.info("*" * 60)
diff_results = Counter(line.split(" ")[0] for line in diff_out)
diff_results = dict((x, diff_results[x]) for x in
["add", "remove", "move", "update"])
logging.info(("Diff results: {add} added, {remove} removed, " +
"{move} moved, {update} modified").format(**diff_results))
if (config["snapraid"]["deletethreshold"] >= 0 and
diff_results["remove"] > config["snapraid"]["deletethreshold"]):
logging.error(
"Deleted files exceed delete threshold of {}, aborting".format(
config["snapraid"]["deletethreshold"]))
finish(False)
if (diff_results["remove"] + diff_results["add"] + diff_results["move"] +
diff_results["update"] == 0):
logging.info("No changes detected, no sync required")
else:
logging.info("Running sync...")
try:
snapraid_command("sync")
except subprocess.CalledProcessError as e:
logging.error(e)
finish(False)
logging.info("*" * 60)
if config["scrub"]["enabled"]:
logging.info("Running scrub...")
try:
snapraid_command("scrub", {
"percentage": config["scrub"]["percentage"],
"older-than": config["scrub"]["older-than"],
})
except subprocess.CalledProcessError as e:
logging.error(e)
finish(False)
logging.info("*" * 60)
logging.info("All done")
finish(True)
main()
|
portscanPython3.py
|
# py3
import socket
import threading
import queue
memes = int(input("threads: "))
target = (input("IP address: "))
minRange = int(input("min: "))
maxRange = int(input("max: "))
maxRange += 1
print_lock = threading.Lock()
q = queue.Queue()
def portscan(port):
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
try:
con = s.connect((target,port))
with print_lock:
print(port,'open')
con.close()
except:
with print_lock:
print(port,'closed')
return
def threader():
while True:
worker = q.get()
portscan(worker)
q.task_done
for x in range(memes):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in range(minRange,maxRange):
q.put(worker)
q.join()
#queue.put in python3 queue.Queue()
#target = IP
#doesnt work on ubuntu? only responds as closed
|
sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from threading import Thread
import tensorflow as tf
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _sdca_ops
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _ShardedMutableHashTable
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SparseFeatureColumn
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3, 10]
_NUM_PARTITIONS = [2, 4]
def make_example_proto(feature_dict, target, value=1.0):
e = tf.train.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target': tf.FixedLenFeature(shape=[1],
dtype=tf.float32,
default_value=0),
'age_indices': tf.VarLenFeature(dtype=tf.int64),
'age_values': tf.VarLenFeature(dtype=tf.float32),
'gender_indices': tf.VarLenFeature(dtype=tf.int64),
'gender_values': tf.VarLenFeature(dtype=tf.float32)
}
return tf.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['age_indices'].indices)[0], [-1]),
tf.reshape(parsed['age_indices'].values, [-1]),
tf.reshape(parsed['age_values'].values, [-1])), SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['gender_indices'].indices)[0], [-1]),
tf.reshape(parsed['gender_indices'].values, [-1]),
tf.reshape(parsed['gender_values'].values, [-1]))
]
return dict(sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=tf.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32))
gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32))
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = tf.convert_to_tensor(dense_feature, dtype=tf.float32)
check_shape_op = tf.Assert(
tf.less_equal(tf.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with tf.control_dependencies([check_shape_op]):
dense_tensor = tf.reshape(dense_tensor,
[dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
tf.Variable(
tf.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=tf.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return tf.cast(
tf.greater_equal(predictions, tf.ones_like(predictions) * cutoff),
dtype=tf.int32)
def get_binary_predictions_for_hinge(predictions):
return tf.cast(
tf.greater_equal(predictions, tf.zeros_like(predictions)),
dtype=tf.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testDistributedSimple(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
for num_partitions in _NUM_PARTITIONS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_partitions=num_partitions)
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def Minimize():
with self._single_threaded_test_session():
for _ in range(_MAX_ITERATIONS):
train_op.run()
threads = []
for _ in range(num_partitions):
threads.append(Thread(target=Minimize))
threads[-1].start()
for t in threads:
t.join()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures
# that the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 0),
# Will be used.
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [2],
'gender': [0]}, 0),
make_example_proto(
{'age': [3],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.226487 + 0.102902,
unregularized_loss.eval(),
atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(
examples, variables, options, num_table_shards=num_shards)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
# 2 more identical examples
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0, -2.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0],
predictions.eval(),
atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0/3],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SparseFeatureColumnTest(SdcaModelTest):
"""Tests for SparseFeatureColumn.
"""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertTrue(isinstance(sfc.example_indices, tf.Tensor))
self.assertTrue(isinstance(sfc.feature_indices, tf.Tensor))
self.assertEqual(sfc.feature_values, None)
with self._single_threaded_test_session():
self.assertAllEqual(expected_example_indices, sfc.example_indices.eval())
self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval())
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self._single_threaded_test_session():
self.assertAllEqual(expected_feature_values, sfc.feature_values.eval())
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = tf.constant(['abc', 'very looooooong string', 'def'])
out_data = _sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([b'a085f09013029e45-3980b2afd2126c04',
b'bc5a254df959f26c-512e479a50910f9f',
b'79999cd817a03f12-085f182230e03022'],
out_data.eval())
class ShardedMutableHashTableTest(SdcaModelTest):
"""Tests for the _ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self._single_threaded_test_session():
default_val = -1
keys = tf.constant(['brain', 'salad', 'surgery'])
values = tf.constant([0, 1, 2], tf.int64)
table = _ShardedMutableHashTable(tf.string,
tf.int64,
default_val,
num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
self.assertAllEqual(3, table.values_reduce_sum().eval())
if __name__ == '__main__':
googletest.main()
|
test_raft.py
|
#!/usr/bin/env python
# Copyright (C) 2014:
# Gabes Jean, naparuba@gmail.com
import threading
from multiprocessing import cpu_count, Process
NB_CPUS = cpu_count()
from opsbro_test import *
from opsbro.raft import RaftLayer, RaftManager
from opsbro.log import cprint
NB_NODES = 3
ALL_NODES = {}
class TestRaftLayer(RaftLayer):
def __init__(self, node_uuid):
global NB_NODES
super(TestRaftLayer, self).__init__()
self.uuid = node_uuid
def get_nodes_uuids(self):
global ALL_NODES
return list(ALL_NODES.keys()) # list for python3
def send_raft_message(self, node_uuid, msg):
if node_uuid == self.uuid:
return # not to our selve
other_manager = ALL_NODES[node_uuid]
logger.info('I %s give a message (%s) to %s' % (self.uuid, msg['type'], node_uuid))
other_manager.stack_message(msg, None)
def get_my_uuid(self):
return self.uuid
def get_other_node(self, node_uuid):
raise NotImplemented()
class RaftQueue():
def __init__(self):
self.queue = []
self.lock = threading.RLock()
def put(self, m):
with self.lock:
self.queue.append(m)
def get(self):
with self.lock:
if len(self.queue) == 0:
return {}
m = self.queue.pop()
return m
class TestRaft(OpsBroTest):
def tearDown(self):
self.stop()
def create(self, N=3):
global ALL_NODES
ALL_NODES.clear() # reset other tests
for node_uuid in range(N):
layer = TestRaftLayer(node_uuid)
manager = RaftManager(layer)
ALL_NODES[node_uuid] = manager
def _reset_stats(self):
self.stats = {'votes': {}, 'election_turn': {}, 'frozen_number': {}, 'is_frozen': {True: 0, False: 0}, 'with_leader': {True: 0, False: 0}}
def _compute_stats(self):
self._reset_stats()
for (node_uuid, manager) in ALL_NODES.items():
raft_node = manager.raft_node
state = raft_node._state
cprint("Node: %s is %s" % (node_uuid, state))
if state not in self.stats:
self.stats[state] = 0
self.stats[state] += 1
# Save candidate votes
if state == 'candidate':
self.stats['votes'][node_uuid] = raft_node._nb_vote_received
# Display election turns
election_turn = raft_node._election_turn
if election_turn not in self.stats['election_turn']:
self.stats['election_turn'][election_turn] = 0
self.stats['election_turn'][election_turn] += 1
# and frozen number
# if n.frozen_number not in self.stats['frozen_number']:
# self.stats['frozen_number'][n.frozen_number] = 0
# self.stats['frozen_number'][n.frozen_number] += 1
# self.stats['is_frozen'][n.is_frozen] += 1
self.stats['with_leader'][(raft_node._leader is not None)] += 1
def count(self, state):
# hummering the stats so we are up to date
self._compute_stats()
logger.info('\n' * 10 + "Computed stats:" + '\n' * 10)
logger.info('%s' % self.stats)
return self.stats.get(state, 0)
def get_number_of_election_turns(self):
return len(self.stats['election_turn'])
def launch(self):
for (node_uuid, manager) in ALL_NODES.items():
t = threading.Thread(None, target=manager.do_raft_thread, name='node-%d' % node_uuid)
t.daemon = True
t.start()
self.start = time.time()
def stop(self):
for (node_uuid, manager) in ALL_NODES.items():
cprint("STOPPING: %s" % node_uuid)
manager.stop()
# Create N nodes with their own thread, and wait some seconds
def create_and_wait(self, N=3, wait=3):
self.create(N)
self.launch()
start = time.time()
while True:
now = time.time()
self._compute_stats()
nb_leader = self.count('leader')
nb_followers = self.count('follower')
if now > start + wait:
err = 'Election timeout after %s seconds: nbleader=%s nbfollower=%s electionturn=%s' % (wait, nb_leader, nb_followers, 33)
cprint('ERROR: %s' % err)
os._exit(2) # fast kill
cprint("test_raft_large_leader_election:: Looking if we really got a leader, and only one")
if nb_leader == 1 and nb_followers == N - 1:
if self.get_number_of_election_turns() != 1:
cprint('FAIL: Election did SUCCESS but the election turn is not stable: nbleader=%s nbfollower=%s electionturn=%s after %.3fsec' % (nb_leader, nb_followers, self.get_number_of_election_turns(), time.time() - start))
cprint(str(self.stats))
os._exit(2) # fast kill
# Ok valid election turns
cprint('Election did SUCCESS : nbleader=%s nbfollower=%s electionturn=%s after %.3fsec' % (nb_leader, nb_followers, 33, time.time() - start))
cprint(str(self.stats))
os._exit(0)
cprint("Current: %.3f %s %s %s" % (time.time() - start, nb_leader, nb_followers, 33))
time.sleep(0.5)
def get_leader(self):
for d in self.nodes:
n = d['node']
if n.state == 'leader':
return n
def get_all_state(self, state):
res = []
for d in self.nodes:
n = d['node']
if n.state == state:
res.append(n)
return res
############################### TESTS
def test_raft_simple_leader_election(self):
self.create_and_wait(N=3, wait=3)
nb_leader = self.count('leader')
self.assert_(nb_leader == 1)
# always clean before exiting a test
self.stop()
# Try with far more nodes
def test_raft_large_leader_election(self):
cprint("TEST: test_raft_large_leader_election")
NB_NODES_BY_CPU = int(os.environ.get('NB_NODES_BY_CPU', '75'))
TEST_TIMEOUT = int(os.environ.get('TEST_TIMEOUT', '30'))
N = NB_NODES_BY_CPU # * NB_CPUS
wait = TEST_TIMEOUT # for very slow computing like travis?
# launch this test as a sub process so we can kill it as fast as possible when finish (no close and such log things)
process = Process(None, target=self.create_and_wait, args=(N, wait))
process.start()
process.join(wait + 3)
if process.is_alive():
os.kill(process.pid, 9) # KILL
raise Exception('The process did timeout after %s seconds' % (wait + 3))
if process.exitcode != 0:
raise Exception('The process did fail with return code: %s' % process.exitcode)
cprint('OK: the process did exit well')
if __name__ == '__main__':
unittest.main()
|
fuchsia.py
|
# Copyright (C) 2018 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import select
import socket
import subprocess
import sys
import threading
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.path_finder import get_chromium_src_dir
from blinkpy.web_tests.port import base
from blinkpy.web_tests.port import driver
from blinkpy.web_tests.port import factory
from blinkpy.web_tests.port import linux
from blinkpy.web_tests.port import server_process
# Modules loaded dynamically in _import_fuchsia_runner().
# pylint: disable=invalid-name
fuchsia_target = None
qemu_target = None
symbolizer = None
# pylint: enable=invalid-name
# Imports Fuchsia runner modules. This is done dynamically only when FuchsiaPort
# is instantiated to avoid dependency on Fuchsia runner on other platforms.
def _import_fuchsia_runner():
sys.path.insert(0, os.path.join(get_chromium_src_dir(), 'build/fuchsia'))
# pylint: disable=import-error
# pylint: disable=invalid-name
# pylint: disable=redefined-outer-name
global aemu_target
import aemu_target
global device_target
import device_target
global fuchsia_target
import target as fuchsia_target
global qemu_target
import qemu_target
global symbolizer
import symbolizer
# pylint: enable=import-error
# pylint: enable=invalid-name
# pylint: disable=redefined-outer-name
# Path to the content shell package relative to the build directory.
CONTENT_SHELL_PACKAGE_PATH = 'gen/content/shell/content_shell/content_shell.far'
# HTTP path prefixes for the HTTP server.
# WEB_TEST_PATH_PREFIX should be matched to the local directory name of
# web_tests because some tests and test_runner find test root directory
# with it.
WEB_TESTS_PATH_PREFIX = '/third_party/blink/' + WEB_TESTS_LAST_COMPONENT
# Paths to the directory where the fonts are copied to. Must match the path in
# content/shell/app/blink_test_platform_support_fuchsia.cc .
FONTS_DEVICE_PATH = '/system/fonts'
# Number of CPU cores in qemu.
CPU_CORES = 4
# Number of content_shell instances to run in parallel. 1 per CPU core.
MAX_WORKERS = CPU_CORES
PROCESS_START_TIMEOUT = 20
_log = logging.getLogger(__name__)
def _subprocess_log_thread(pipe, prefix):
try:
while True:
line = pipe.readline()
if not line:
return
_log.error('%s: %s', prefix, line)
finally:
pipe.close()
class SubprocessOutputLogger(object):
def __init__(self, process, prefix):
self._process = process
self._thread = threading.Thread(
target=_subprocess_log_thread, args=(process.stdout, prefix))
self._thread.daemon = True
self._thread.start()
def __del__(self):
self.close()
def close(self):
self._process.kill()
class _TargetHost(object):
def __init__(self, build_path, build_ids_path, ports_to_forward, target,
results_directory):
try:
self._amber_repo = None
self._target = target
self._target.Start()
self._setup_target(build_path, build_ids_path, ports_to_forward,
results_directory)
except:
self.cleanup()
raise
def _setup_target(self, build_path, build_ids_path, ports_to_forward,
results_directory):
# Tell SSH to forward all server ports from the Fuchsia device to
# the host.
forwarding_flags = [
'-O',
'forward', # Send SSH mux control signal.
'-N', # Don't execute command
'-T' # Don't allocate terminal.
]
for port in ports_to_forward:
forwarding_flags += ['-R', '%d:localhost:%d' % (port, port)]
self._proxy = self._target.RunCommandPiped([],
ssh_args=forwarding_flags,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._listener = self._target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
listener_log_path = os.path.join(results_directory, 'system_log')
listener_log = open(listener_log_path, 'w')
self.symbolizer = symbolizer.RunSymbolizer(
self._listener.stdout, listener_log, [build_ids_path])
self._amber_repo = self._target.GetAmberRepo()
self._amber_repo.__enter__()
package_path = os.path.join(build_path, CONTENT_SHELL_PACKAGE_PATH)
self._target.InstallPackage([package_path])
# Process will be forked for each worker, which may make QemuTarget
# unusable (e.g. waitpid() for qemu process returns ECHILD after
# fork() ). Save command runner before fork()ing, to use it later to
# connect to the target.
self.target_command_runner = self._target.GetCommandRunner()
def run_command(self, command):
return self.target_command_runner.RunCommandPiped(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def cleanup(self):
if self._amber_repo:
self._amber_repo.__exit__(None, None, None)
if self._target:
# Emulator targets will be shutdown during cleanup.
# TODO(sergeyu): Currently __init__() always starts Qemu, so we can
# just shutdown it. Update this logic when reusing target devices
# for multiple test runs.
if not isinstance(self._target, device_target.DeviceTarget):
self._target.Shutdown()
self._target = None
class FuchsiaPort(base.Port):
port_name = 'fuchsia'
SUPPORTED_VERSIONS = ('fuchsia', )
FALLBACK_PATHS = {
'fuchsia':
['fuchsia'] + linux.LinuxPort.latest_platform_fallback_path()
}
def __init__(self, host, port_name, **kwargs):
super(FuchsiaPort, self).__init__(host, port_name, **kwargs)
self._operating_system = 'fuchsia'
self._version = 'fuchsia'
self._target_device = self.get_option('device')
# TODO(sergeyu): Add support for arm64.
self._architecture = 'x86_64'
self.server_process_constructor = FuchsiaServerProcess
# Used to implement methods that depend on the host platform.
self._host_port = factory.PortFactory(host).get(**kwargs)
self._target_host = self.get_option('fuchsia_target')
self._zircon_logger = None
self._host_ip = self.get_option('fuchsia_host_ip')
_import_fuchsia_runner()
def _driver_class(self):
return ChromiumFuchsiaDriver
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, CONTENT_SHELL_PACKAGE_PATH)
def __del__(self):
if self._zircon_logger:
self._zircon_logger.close()
def setup_test_run(self):
super(FuchsiaPort, self).setup_test_run()
try:
target_args = {
'out_dir': self._build_path(),
'system_log_file': None,
'fuchsia_out_dir': self.get_option('fuchsia_out_dir')
}
if self._target_device == 'device':
additional_args = {
'target_cpu': self.get_option('fuchsia_target_cpu'),
'ssh_config': self.get_option('fuchsia_ssh_config'),
'os_check': 'ignore',
'host': self.get_option('fuchsia_host'),
'port': self.get_option('fuchsia_port'),
'node_name': self.get_option('fuchsia_node_name')
}
target_args.update(additional_args)
target = device_target.DeviceTarget(**target_args)
else:
additional_args = {
'target_cpu': 'x64',
'cpu_cores': CPU_CORES,
'require_kvm': True,
'ram_size_mb': 8192
}
if self._target_device == 'qemu':
target_args.update(additional_args)
target = qemu_target.QemuTarget(**target_args)
else:
additional_args.update({
'enable_graphics': False,
'hardware_gpu': False
})
target_args.update(additional_args)
target = aemu_target.AemuTarget(**target_args)
self._target_host = _TargetHost(self._build_path(),
self.get_build_ids_path(),
self.SERVER_PORTS, target,
self.results_directory())
if self.get_option('zircon_logging'):
self._zircon_logger = SubprocessOutputLogger(
self._target_host.run_command(['dlog', '-f']), 'Zircon')
# Save fuchsia_target in _options, so it can be shared with other
# workers.
self._options.fuchsia_target = self._target_host
except fuchsia_target.FuchsiaTargetException as e:
_log.error('Failed to start qemu: %s.', str(e))
return exit_codes.NO_DEVICES_EXIT_STATUS
def clean_up_test_run(self):
if self._target_host:
self._target_host.cleanup()
self._target_host = None
def num_workers(self, requested_num_workers):
# Run a single qemu instance.
return min(MAX_WORKERS, requested_num_workers)
def _default_timeout_ms(self):
# Use 20s timeout instead of the default 6s. This is necessary because
# the tests are executed in qemu, so they run slower compared to other
# platforms.
return 20000
def requires_http_server(self):
"""HTTP server is always required to avoid copying the tests to the VM.
"""
return True
def start_http_server(self, additional_dirs, number_of_drivers):
additional_dirs['/third_party/blink/PerformanceTests'] = \
self._perf_tests_dir()
additional_dirs[WEB_TESTS_PATH_PREFIX] = self.web_tests_dir()
additional_dirs['/gen'] = self.generated_sources_directory()
additional_dirs['/third_party/blink'] = \
self._path_from_chromium_base('third_party', 'blink')
super(FuchsiaPort, self).start_http_server(additional_dirs,
number_of_drivers)
def path_to_apache(self):
return self._host_port.path_to_apache()
def path_to_apache_config_file(self):
return self._host_port.path_to_apache_config_file()
def default_smoke_test_only(self):
return True
def get_target_host(self):
return self._target_host
def get_build_ids_path(self):
package_path = self._path_to_driver()
return os.path.join(os.path.dirname(package_path), 'ids.txt')
class ChromiumFuchsiaDriver(driver.Driver):
def __init__(self, port, worker_number, no_timeout=False):
super(ChromiumFuchsiaDriver, self).__init__(port, worker_number,
no_timeout)
def _initialize_server_process(self, server_name, cmd_line, environment):
self._server_process = self._port.server_process_constructor(
self._port,
server_name,
cmd_line,
environment,
more_logging=self._port.get_option('driver_logging'),
host_ip=self._port._host_ip)
def _base_cmd_line(self):
cmd = [
'run',
'fuchsia-pkg://fuchsia.com/content_shell#meta/content_shell.cmx'
]
if self._port._target_device == 'qemu':
cmd.append('--ozone-platform=headless')
# Use Scenic on AEMU
else:
cmd.extend([
'--ozone-platform=scenic', '--enable-oop-rasterization',
'--use-vulkan', '--enable-gpu-rasterization',
'--force-device-scale-factor=1', '--use-gl=stub',
'--enable-features=UseSkiaRenderer,Vulkan'
])
return cmd
def _command_from_driver_input(self, driver_input):
command = super(ChromiumFuchsiaDriver,
self)._command_from_driver_input(driver_input)
if command.startswith('/'):
relative_test_filename = \
os.path.relpath(command, self._port.web_tests_dir())
command = 'http://127.0.0.1:8000' + WEB_TESTS_PATH_PREFIX + \
'/' + relative_test_filename
return command
# Custom version of ServerProcess that runs processes on a remote device.
class FuchsiaServerProcess(server_process.ServerProcess):
def __init__(self,
port_obj,
name,
cmd,
env=None,
treat_no_data_as_crash=False,
more_logging=False,
host_ip=None):
super(FuchsiaServerProcess, self).__init__(
port_obj, name, cmd, env, treat_no_data_as_crash, more_logging)
self._symbolizer_proc = None
self._host_ip = host_ip or qemu_target.HOST_IP_ADDRESS
def _start(self):
if self._proc:
raise ValueError('%s already running' % self._name)
self._reset()
# Fuchsia doesn't support stdin stream for packaged applications, so the
# stdin stream for content_shell is routed through a separate TCP
# socket. Open a local socket and then pass the address with the port as
# --stdin-redirect parameter. content_shell will connect to this address
# and will use that connection as its stdin stream.
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.bind(('127.0.0.1', 0))
listen_socket.listen(1)
stdin_port = listen_socket.getsockname()[1]
command = ['%s=%s' % (k, v) for k, v in self._env.items()] + \
self._cmd + \
['--no-sandbox', '--stdin-redirect=%s:%s' %
(self._host_ip, stdin_port)]
proc = self._port.get_target_host().run_command(command)
# Wait for incoming connection from content_shell.
fd = listen_socket.fileno()
read_fds, _, _ = select.select([fd], [], [], PROCESS_START_TIMEOUT)
if fd not in read_fds:
listen_socket.close()
proc.kill()
raise driver.DeviceFailure(
'Timed out waiting connection from content_shell.')
# Python's interfaces for sockets and pipes are different. To masquerade
# the socket as a pipe dup() the file descriptor and pass it to
# os.fdopen().
stdin_socket, _ = listen_socket.accept()
fd = stdin_socket.fileno() # pylint: disable=no-member
stdin_pipe = os.fdopen(os.dup(fd), "w", 0)
stdin_socket.close()
proc.stdin.close()
proc.stdin = stdin_pipe
# Run symbolizer to filter the stderr stream.
self._symbolizer_proc = symbolizer.RunSymbolizer(
proc.stderr, subprocess.PIPE, [self._port.get_build_ids_path()])
proc.stderr = self._symbolizer_proc.stdout
self._set_proc(proc)
def stop(self, timeout_secs=0.0, kill_tree=False):
result = super(FuchsiaServerProcess, self).stop(
timeout_secs, kill_tree)
if self._symbolizer_proc:
self._symbolizer_proc.kill()
return result
|
mash.py
|
#!/usr/bin/env python
from accessoryFunctions.accessoryFunctions import printtime, make_path, GenObject
from threading import Thread
from subprocess import call
from queue import Queue
import os
import re
__author__ = 'adamkoziol'
class Mash(object):
def sketching(self):
printtime('Indexing files for {} analysis'.format(self.analysistype), self.starttime)
# Create the threads for the analysis
for i in range(self.cpus):
threads = Thread(target=self.sketch, args=())
threads.setDaemon(True)
threads.start()
# Populate threads for each gene, genome combination
for sample in self.metadata:
# Create the analysis type-specific GenObject
setattr(sample, self.analysistype, GenObject())
# Set attributes
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype)
make_path(sample[self.analysistype].reportdir)
sample[self.analysistype].targetpath = self.referencefilepath if not self.pipeline else os.path.join(
self.referencefilepath, self.analysistype)
sample[self.analysistype].refseqsketch = os.path.join(sample[self.analysistype].targetpath,
'RefSeqSketchesDefaults.msh')
sample[self.analysistype].sketchfilenoext = os.path.join(sample[self.analysistype].reportdir, sample.name)
sample[self.analysistype].sketchfile = sample[self.analysistype].sketchfilenoext + '.msh'
# Make the mash output directory if necessary
make_path(sample[self.analysistype].reportdir)
# Create a file containing the path/name of the filtered, corrected fastq files
sample[self.analysistype].filelist = os.path.join(sample[self.analysistype].reportdir,
'{}_fastqfiles.txt'.format(sample.name))
with open(sample[self.analysistype].filelist, 'w') as filelist:
filelist.write('\n'.join(sample.general.trimmedcorrectedfastqfiles))
# Create the system call
sample.commands.sketch = 'mash sketch -m 2 -p {} -l {} -o {}' \
.format(self.cpus, sample[self.analysistype].filelist, sample[self.analysistype].sketchfilenoext)
# Add each sample to the threads
try:
self.sketchqueue.put(sample)
except (KeyboardInterrupt, SystemExit):
printtime('Received keyboard interrupt, quitting threads', self.starttime)
quit()
# Join the threads
self.sketchqueue.join()
self.mashing()
def sketch(self):
while True:
sample = self.sketchqueue.get()
if not os.path.isfile(sample[self.analysistype].sketchfile):
call(sample.commands.sketch, shell=True, stdout=self.fnull, stderr=self.fnull)
self.sketchqueue.task_done()
def mashing(self):
printtime('Performing {} analyses'.format(self.analysistype), self.starttime)
# Create the threads for the analysis
for i in range(self.cpus):
threads = Thread(target=self.mash, args=())
threads.setDaemon(True)
threads.start()
# Populate threads for each gene, genome combination
for sample in self.metadata:
sample[self.analysistype].mashresults = os.path.join(sample[self.analysistype].reportdir, '{}.tab'.format(
sample.name))
sample.commands.mash = \
'mash dist -p {} {} {} | sort -gk3 > {}'.format(self.threads,
sample[self.analysistype].refseqsketch,
sample[self.analysistype].sketchfile,
sample[self.analysistype].mashresults)
try:
self.mashqueue.put(sample)
except (KeyboardInterrupt, SystemExit):
printtime('Received keyboard interrupt, quitting threads', self.starttime)
quit()
# Join the threads
self.mashqueue.join()
self.parse()
def mash(self):
while True:
sample = self.mashqueue.get()
# , stdout=self.fnull, stderr=self.fnull
if not os.path.isfile(sample[self.analysistype].mashresults):
call(sample.commands.mash, shell=True)
self.mashqueue.task_done()
def parse(self):
printtime('Determining closest refseq genome', self.starttime)
# Create a dictionary to store the accession: taxonomy id of refseq genomes
refdict = dict()
# Set the name of the file storing the assembly summaries
referencefile = os.path.join(self.referencefilepath, self.analysistype, 'assembly_summary_refseq.txt')
# Extract the accession: genus species key: value pairs from the refseq summary file
with open(referencefile) as reffile:
for line in reffile:
# Ignore the first couple of lines
if line.startswith('# assembly_accession'):
# Iterate through all the lines with data
for accessionline in reffile:
# Split the lines on tabs
data = accessionline.split('\t')
# Populate the dictionary with the accession: tax id e.g. GCF_001298055: Helicobacter pullorum
refdict[data[0].split('.')[0]] = data[7]
for sample in self.metadata:
try:
# Open the results and extract the first line of data
mashdata = open(sample[self.analysistype].mashresults).readline().rstrip()
# Split on tabs
data = mashdata.split('\t')
referenceid, queryid, sample[self.analysistype].mashdistance, sample[self.analysistype]. \
pvalue, sample[self.analysistype].nummatches = data
# Extract the name of the refseq assembly from the mash outputs, and split as necessary e.g.
# GCF_000008865.1_ASM886v1_genomic.fna.gz becomes GCF_000008865
refid = referenceid.split('.')[0]
# Find the genus and species of the sample using the dictionary of refseq summaries
sample[self.analysistype].closestrefseq = refdict[refid]
sample[self.analysistype].closestrefseqgenus = sample[self.analysistype].closestrefseq.split()[0]
sample[self.analysistype].closestrefseqspecies = sample[self.analysistype].closestrefseq.split()[1]
except (KeyError, ValueError):
sample[self.analysistype].closestrefseq = 'NA'
sample[self.analysistype].closestrefseqgenus = 'NA'
sample[self.analysistype].closestrefseqspecies = 'NA'
sample[self.analysistype].mashdistance = 'NA'
sample[self.analysistype].pvalue = 'NA'
sample[self.analysistype].nummatches = 'NA'
# Set the closest refseq genus - will be used for all typing that requires the genus to be known
sample.general.referencegenus = sample[self.analysistype].closestrefseqgenus
self.reporter()
def reporter(self):
make_path(self.reportpath)
header = 'Strain,ReferenceGenus,ReferenceFile,ReferenceGenomeMashDistance,Pvalue,NumMatchingHashes\n'
data = ''
for sample in self.metadata:
try:
data += '{},{},{},{},{},{}\n'.format(sample.name,
sample[self.analysistype].closestrefseqgenus,
sample[self.analysistype].closestrefseq,
sample[self.analysistype].mashdistance,
sample[self.analysistype].pvalue,
sample[self.analysistype].nummatches)
except AttributeError:
data += '{}\n'.format(sample.name)
# Create the report file
reportfile = os.path.join(self.reportpath, 'mash.csv')
with open(reportfile, 'w') as report:
report.write(header)
report.write(data)
def __init__(self, inputobject, analysistype):
self.metadata = inputobject.runmetadata.samples
self.referencefilepath = inputobject.reffilepath
self.starttime = inputobject.starttime
self.reportpath = inputobject.reportpath
self.cpus = inputobject.cpus
self.threads = int(self.cpus / len(self.metadata)) if self.cpus / len(self.metadata) > 1 else 1
self.sketchqueue = Queue(maxsize=self.cpus)
self.mashqueue = Queue(maxsize=4)
self.analysistype = analysistype
self.pipeline = inputobject.pipeline
self.fnull = open(os.devnull, 'w') # define /dev/null
self.sketching()
|
preprocess.py
|
import numpy as np
from random import shuffle
import scipy.io as io
import argparse
from helper import *
import threading
import time
import itertools
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='Indian_pines', help='Default: Indian_pines, options: Salinas, KSC, Botswana')
parser.add_argument('--train_ratio', type=float, default=0.2)
parser.add_argument('--validation_ratio', type=float, default=0.05)
parser.add_argument('--channel_first', type=bool, default=False, help='Image channel located on the last dimension')
parser.add_argument('--dtype', type=str, default='float32', help='Data type (Eg float64, float32, float16, int64...')
parser.add_argument('--plot', type=bool, default=False, help='TRUE to plot satellite images and ground truth at the end')
opt = parser.parse_args()
# Try loading data from the folder... Otherwise download from online
input_mat, target_mat = maybeDownloadOrExtract(opt.data)
# Output data type
datatype = getdtype(opt.dtype)
HEIGHT = input_mat.shape[0]
WIDTH = input_mat.shape[1]
BAND = input_mat.shape[2]
OUTPUT_CLASSES = np.max(target_mat)
PATCH_SIZE = 5
CHANNEL_FIRST = opt.channel_first
# Normalize image data and select datatype
input_mat = input_mat.astype(datatype)
input_mat = input_mat - np.min(input_mat)
input_mat = input_mat / np.max(input_mat)
# Extract a list that contains the class number with sufficient training samples
list_labels = getListLabel(opt.data)
# For showing a animation only
end_loading = False
def animate():
global end_loading
for c in itertools.cycle(['|', '/', '-', '\\']):
if end_loading:
break
sys.stdout.write('\rExtracting '+ opt.data + ' dataset features...' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rFinished!\t')
print("+-------------------------------------+")
print('Input_mat shape: ' + str(input_mat.shape))
MEAN_ARRAY = np.ndarray(shape=(BAND, 1))
new_input_mat = []
input_mat = np.transpose(input_mat, (2, 0, 1))
calib_val_pad = int((PATCH_SIZE - 1)/2)
for i in range(BAND):
MEAN_ARRAY[i] = np.mean(input_mat[i, :, :])
new_input_mat.append(np.pad(input_mat[i, :, :], calib_val_pad, 'constant', constant_values=0))
input_mat = np.array(new_input_mat)
def Patch(height_index, width_index):
# Input:
# Given the index position (x,y) of spatio dimension of the hyperspectral image,
# Output:
# a data cube with patch size S (24 neighbours), with label based on central pixel
height_slice = slice(height_index, height_index+PATCH_SIZE)
width_slice = slice(width_index, width_index+PATCH_SIZE)
patch = input_mat[:, height_slice, width_slice]
mean_normalized_patch = []
for i in range(patch.shape[0]):
mean_normalized_patch.append(patch[i] - MEAN_ARRAY[i])
return np.array(mean_normalized_patch).astype(datatype)
# Assign empty array to store patched images
CLASSES = []
for i in range(OUTPUT_CLASSES):
CLASSES.append([])
# Assign empty array to count samples in each class
class_label_counter = [0] * OUTPUT_CLASSES
# Start timing for loading
t = threading.Thread(target=animate).start()
start = time.time()
count = 0
for i in range(HEIGHT-1):
for j in range(WIDTH-1):
curr_inp = Patch(i, j)
curr_tar = target_mat[i, j]
if curr_tar:
CLASSES[curr_tar-1].append(curr_inp)
class_label_counter[curr_tar-1] += 1
count += 1
end_loading = True
end = time.time()
print("Total excution time..." + str(end-start)+'seconds')
print('Total number of samples: ' + str(count))
showClassTable(class_label_counter)
TRAIN_PATCH, TRAIN_LABELS = [], []
TEST_PATCH, TEST_LABELS =[], []
VAL_PATCH, VAL_LABELS = [], []
train_ratio = opt.train_ratio
val_ratio = opt.validation_ratio
# test_ratio = reminder of data
counter = 0 # Represent train_index position
for i, data in enumerate(CLASSES):
datasize = []
if i + 1 in list_labels:
shuffle(data)
print('Class ' + str(i + 1) + ' is accepted')
size = round(class_label_counter[i]*train_ratio)
TRAIN_PATCH += data[:size]
TRAIN_LABELS += [counter] * size
datasize.append(size)
size1 = round(class_label_counter[i]*val_ratio)
VAL_PATCH += data[size:size+size1]
VAL_LABELS += [counter] * (size1)
datasize.append(size1)
TEST_PATCH += data[size+size1:]
TEST_LABELS += [counter] * len(data[size+size1:])
datasize.append(len(TEST_PATCH))
counter += 1
else:
print('-Class ' + str(i + 1) + ' is rejected due to insufficient samples')
TRAIN_LABELS = np.array(TRAIN_LABELS)
TRAIN_PATCH = np.array(TRAIN_PATCH)
TEST_PATCH = np.array(TEST_PATCH)
TEST_LABELS = np.array(TEST_LABELS)
VAL_PATCH = np.array(VAL_PATCH)
VAL_LABELS = np.array(VAL_LABELS)
print("+-------------------------------------+")
print("Size of Training data: " + str(len(TRAIN_PATCH)) )
print("Size of Validation data: " + str(len(VAL_PATCH)) )
print("Size of Testing data: " + str(len(TEST_PATCH)) )
print("+-------------------------------------+")
train_idx = list(range(len(TRAIN_PATCH)))
shuffle(train_idx)
TRAIN_PATCH = TRAIN_PATCH[train_idx]
if not CHANNEL_FIRST:
TRAIN_PATCH = np.transpose(TRAIN_PATCH, (0, 2, 3, 1))
TRAIN_LABELS = OnehotTransform(TRAIN_LABELS[train_idx])
train = {}
train["train_patch"] = TRAIN_PATCH
train["train_labels"] = TRAIN_LABELS
io.savemat("./data/" + opt.data + "_Train_patch_" + str(PATCH_SIZE) + ".mat", train)
test_idx = list(range(len(TEST_PATCH)))
shuffle(test_idx)
TEST_PATCH = TEST_PATCH[test_idx]
if not CHANNEL_FIRST:
TEST_PATCH = np.transpose(TEST_PATCH, (0, 2, 3, 1))
TEST_LABELS = OnehotTransform(TEST_LABELS[test_idx])
test = {}
test["test_patch"] = TEST_PATCH
test["test_labels"] = TEST_LABELS
io.savemat("./data/" + opt.data + "_Test_patch_" + str(PATCH_SIZE) + ".mat", test)
val_idx = list(range(len(VAL_PATCH)))
shuffle(val_idx)
VAL_PATCH = VAL_PATCH[val_idx]
if not CHANNEL_FIRST:
VAL_PATCH = np.transpose(VAL_PATCH, (0, 2, 3, 1))
print(VAL_PATCH.shape)
VAL_LABELS = OnehotTransform(VAL_LABELS[val_idx])
val = {}
val["val_patch"] = VAL_PATCH
val["val_labels"] = VAL_LABELS
io.savemat("./data/" + opt.data + "_Val_patch_" + str(PATCH_SIZE) + ".mat", val)
print("+-------------------------------------+")
print("Summary")
print('Train_patch.shape: '+ str(TRAIN_PATCH.shape) )
print('Train_label.shape: '+ str(TRAIN_LABELS.shape) )
print('Test_patch.shape: ' + str(TEST_PATCH.shape))
print('Test_label.shape: ' + str(TEST_LABELS.shape))
print("Validation batch Shape: " + str(VAL_PATCH.shape) )
print("Validation label Shape: " + str(VAL_LABELS.shape) )
print("+-------------------------------------+")
print("\nFinished processing.......")
if opt.plot:
print('\n Looking at some sample images')
plot_random_spec_img(TRAIN_PATCH, TRAIN_LABELS)
plot_random_spec_img(TEST_PATCH, TEST_LABELS)
plot_random_spec_img(VAL_PATCH, VAL_LABELS)
GroundTruthVisualise(target_mat)
|
fullnode.py
|
#!env/bin/python
import json
from operator import truediv
import socketserver
import sys
import threading
import time
import logging
from PyChain import Blockchain, request
from PyChain.protocol import recv_msg, send_msg
"""
A PyChain full node
This network communicated through sockets.
Messages are encoded using JSON.
The fields are:
- request: the type of request
- body (optional, depends on request): the body of the request
- time
A full node's job is to keep track of the blockchain,
by receiving blocks, verifying them and finally adding them to the blockchain.
They also answer to requests from other participants of the blockchain.
"""
logging.basicConfig(level=logging.DEBUG)
blockchain = Blockchain()
blockchain.import_chain([
Blockchain.encode_block(0, b"", 0, "Genesis block")])
def get_peers(old_peers: list):
new_peers = set(old_peers)
for peer in static_peers:
try:
response = request(peer, "get_peers")
n = len(response["response"])
logging.info(f"Got {n} peers from {peer}")
new_peers.union(response["response"])
except:
pass
return list(new_peers)
def check_peers(peers: list):
"""
Checks if a peer responds to ping
"""
alive_peers = []
for peer in peers:
try:
response = request(peer, "ping")
if response["response"] == "pong":
alive_peers.append(peer)
request(peer, "add_me", f"{HOST}:{PORT}")
except:
pass
return alive_peers
def longest_chain(peers: list):
"""
Returns the blockchain with the longest chain from the peers.
This function also verifies that the chain is valid.
"""
peer_length = {}
for peer in peers:
try:
response = request(peer, "get_length")
peer_length[peer] = response["response"]
except Exception as e:
print(e)
sorted_peer_length = {k: v for k, v in sorted(
peer_length.items(), key=lambda item: -item[1])}
for peer, length in sorted_peer_length.items():
# If the peer with the longest chain does not have a longer chain than the local one: break
if length <= len(blockchain.blocks):
break
response = request(peer, "get_blocks")
assert len(response["response"]) == length
chain = Blockchain()
chain.import_chain([
Blockchain.encode_block(0, b"", 0, "Genesis block")])
for block in response["response"][1:]:
chain.blocks.append(Blockchain.dict_to_block(block))
valid, reason = chain.verify_chain()
if valid:
return chain
class RequestHandler(socketserver.BaseRequestHandler):
@staticmethod
def create_response(response: str | dict, http_code: int):
return {"response": response,
"time": time.time(),
"http_code": http_code}
"""
Here come request handing functions
"""
def get_blocks(self):
return self.create_response([Blockchain.block_to_dict(block)
for block in blockchain.blocks], 200)
def get_block(self, index: int):
return self.create_response(Blockchain.block_to_dict(blockchain.blocks[index]), 200)
def get_blochchain_length(self):
return self.create_response(len(blockchain.blocks), 200)
def get_peers(self):
return self.create_response(peers, 200)
def recieve_block(self, block: dict):
blockchain.blocks.append(Blockchain.dict_to_block(block))
if not blockchain.verify_chain()[0]:
blockchain.blocks.pop()
return self.create_response("Invalid chain", 400)
return self.create_response("OK, block added", 200)
def add_peer(self, host: str):
if host in peers:
return self.create_response("Already in peers", 400)
peers.append(host)
return self.create_response("OK", 200)
def handle(self):
"""
This method is called when a request is received
It checks the request type and returns a response
"""
host, port = self.client_address
data = recv_msg(self.request).decode()
request = json.loads(data)
logging.info(f"{host}:{port} requested {request['request']}")
match request['request']:
case 'get_blocks':
response = self.get_blocks()
case 'get_block':
response = self.get_block(request['body'])
case "ping":
response = self.create_response("pong", 200)
case "recieve_block":
response = self.recieve_block(request["body"])
case "get_peers":
response = self.get_peers()
case "get_length":
response = self.get_blochchain_length()
case "add_peer":
response = self.add_peer(request["body"])
case _:
response = self.create_response("Unknown request", 400)
send_msg(self.request, json.dumps(response).encode())
def poll_peers_thread():
global blockchain
global is_on_server
logging.info("Polling peers has started")
while True:
longest_chain_found = longest_chain(peers)
if longest_chain_found:
logging.info(
f"New longest chain of length {len(longest_chain_found.blocks)} found.")
blockchain = longest_chain_found
time.sleep(5)
if __name__ == '__main__':
is_on_server = True
HOST, PORT = 'localhost', int(sys.argv[1])
static_peers = [line for line in open(
'peers.txt', 'r').read().split('\n') if line != '']
peers = get_peers(check_peers(static_peers))
socketserver.TCPServer.allow_reuse_address = True
polling_thread = threading.Thread(target=poll_peers_thread)
polling_thread.start()
with socketserver.ThreadingTCPServer((HOST, PORT), RequestHandler) as server:
logging.info("Starting server on {}:{}".format(HOST, PORT))
server.serve_forever()
logging.info("Stopping server")
|
model_train_eval_manager.py
|
import os
import threading
from application.paths.services.path_service import PathService
from domain.models.hyper_parameter_information import HyperParameterInformation
from domain.models.paths import Paths
from application.training_module.services.model_evaluation_service import ModelEvaluationService
from application.training_module.services.model_trainer_service import ModelTrainerService
from domain.services.contract.abstract_model_train_evaluation_manager import \
AbstarctModelTrainEvaluationManager
class ModelTrainEvaluationManager(AbstarctModelTrainEvaluationManager):
"""
A class used to evaluate training continuously
...
Attributes
----------
path : Paths
DTO containing all necessary paths
model_train :ModelTrainerService
ModelTrainerService instance
model_eval : ModelEvaluationService
ModelEvaluationService instance
Methods
-------
train_eval_continuously( hyper_params: HyperParameterInformation)-> None
evaluate training at each checkpoint using TF internal function eval_continuously() while
training the model at the same time using threading
"""
def __init__(self, path: PathService, model_trainer: ModelTrainerService, model_eval: ModelEvaluationService):
self.path: Paths = path.get_paths()
self.model_train: ModelTrainerService = model_trainer
self.model_eval: ModelEvaluationService = model_eval
def train_eval_continuously(self, hyper_params: HyperParameterInformation) -> None:
evaluation_thread = threading.Thread(target=self.model_eval.evaluate_model, args=(hyper_params,))
training_thread = threading.Thread(target=self.model_train.train, args=(hyper_params,))
evaluation_thread.start()
training_thread.start()
training_thread.join()
|
main.py
|
import argparse
from threading import Thread
from dl import Downloader
from utils.query import download_state, print_download_state
parser = argparse.ArgumentParser(description='Downloader.')
parser.add_argument('strings', metavar='L', type=str, nargs='+',
help='an string for the download')
parser.add_argument('--o', dest='output', help='output file path for download (default: file name from url)')
if __name__ == '__main__':
args = parser.parse_args()
for url in args.strings:
download = Downloader(url, args.output if args.output else url.split('/')[-1])
Thread(target=download.download).start()
thread = Thread(target=print_download_state, args=(download,))
thread.start()
thread.join()
|
cluster.py
|
import threading
from .container_manager import ContainerManager
from .statistics_collector import StatisticsCollector
class Cluster(object):
def __init__(self, containers, is_wait_sync=None, debug=False):
self.is_wait_sync = is_wait_sync
self.containers = containers
self.container_managers = []
self.is_started = False
self.debug = debug
self.stats = []
for c in containers:
if is_wait_sync is not None:
c.wait_sync = is_wait_sync
self.container_managers.append(ContainerManager(container=c))
self.stats_collector = StatisticsCollector(self.container_managers, debug=self.debug)
def start(self):
if self.is_started:
return
threads = []
for c in self.containers:
t = threading.Thread(target=c.start)
threads.append(t)
t.start()
for t in threads:
t.join()
self.is_started = True
return self
def stop(self):
if not self.is_started:
return
threads = []
for c in self.containers:
t = threading.Thread(target=c.stop)
threads.append(t)
t.start()
for t in threads:
t.join()
self.is_started = False
return self
def collect_stats(self, n, test_scenario=None):
if not self.is_started:
return None
self.stats = self.stats_collector.collect_stats(n, test_scenario)
return self.stats
def print_stats(self):
if len(self.stats) == 0:
return
print("Statistics:")
for i, c in enumerate(self.containers):
print(c.description)
if len(self.stats[i]) == 0:
print('Zero traffic on {num} container\n'.format(num=i))
continue
print("Stats (bytes/s):\n{}".format(self.stats[i].describe(include='all').astype(int)))
print("\nTraffic sum(bytes):\n{}".format(self.stats[i].sum(axis=0)))
print()
# search for bias
bias_sum = []
bias_num = 0
for i, c in enumerate(self.containers):
if len(self.stats[i]) != 0:
bias_sum = self.stats[0].sum(axis=0)
bias_num = i
break
for i, c in enumerate(self.containers):
if i == bias_num:
print("Network difference wrt {num} container:".format(num=bias_num))
continue
if len(self.stats[i]) == 0:
continue
print("\nTraffic sum(bytes) - '{}':\n{}".format(c.description,
(self.stats[i].sum(axis=0) - bias_sum) / bias_sum * 100))
|
server.py
|
from __future__ import absolute_import
import subprocess
from multiprocessing import Process
import signal
from .common import *
from .utils import colorify
from . import search
CHILD_PROC = None
MASTER = None
WORKER = None
def server_up(host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
sock.close()
if result == 0:
return True
else:
return False
def server_functional(host, port, dbtype):
if server_up(host, port):
try:
search.get_hits("test", "TESTSEQ", host, port, dbtype)
except Exception, e:
#print 'Server not ready', e
return False
else:
return True
return False
def safe_exit(a, b):
if CHILD_PROC:
CHILD_PROC.kill()
sys.exit(0)
def load_server(dbpath, client_port, worker_port, cpu, output=None):
global CHILD_PID, MASTER, WORKER
if not output:
OUT = open(os.devnull, 'w')
else:
OUT = output
signal.signal(signal.SIGINT, safe_exit)
signal.signal(signal.SIGTERM, safe_exit)
def start_master():
cmd = HMMPGMD +' --master --cport %d --wport %s --hmmdb %s' %(client_port, worker_port, dbpath)
CHILD_PROC = subprocess.Popen(cmd.split(), shell=False, stderr=OUT, stdout=OUT)
while 1:
time.sleep(60)
def start_worker():
cmd = HMMPGMD +' --worker localhost --wport %s --cpu %d' %(worker_port, cpu)
CHILD_PROC = subprocess.Popen(cmd.split(), shell=False, stderr=OUT, stdout=OUT)
while 1:
time.sleep(60)
MASTER = Process(target=start_master)
MASTER.start()
WORKER = Process(target=start_worker)
WORKER.start()
return dbpath, MASTER, WORKER
def shutdown_server():
global MASTER, WORKER
try:
os.killpg(os.getpgid(MASTER.pid), signal.SIGTERM)
except (OSError, AttributeError):
pass
try:
os.killpg(os.getpgid(WORKER.pid), signal.SIGTERM)
except (OSError, AttributeError):
pass
def alive(p):
""" Check For the existence of a unix pid. """
return p.is_alive()
def generate_idmap(dbpath):
if dbpath.endswith(".h3f"):
dbpath = dbpath.replace(".h3f", "")
cmd = """%s %s |grep -v '#'|awk '{print $1" "$2}' > %s""" %(HMMSTAT, dbpath, dbpath+'.idmap')
print colorify(cmd, "cyan")
print('Generating idmap in '+dbpath+'.idmap')
return os.system(cmd) == 0
|
__init__.py
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2017-04-27 12:49:17
# @Last Modified 2018-03-12
# @Last Modified time: 2020-04-05 11:01:47
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def test_function(a,b,c):
return c,b,a
fuzz(test_function)
# or to collect tests
fuzz(test_function, keep_testing=True)
Or:
from battle_tested import battle_tested
@battle_tested()
def test_function(a,b,c):
return c,b,a
"""
from __future__ import print_function, unicode_literals
import builtins
from collections import deque
from functools import wraps, partial
from gc import collect as gc
from generators.inline_tools import attempt
from hypothesis import given, strategies as st, settings, Verbosity
from hypothesis.errors import HypothesisException
from itertools import product, cycle, chain, islice
from multiprocessing import Process, Queue, cpu_count as multi_cpu_count
from prettytable import PrettyTable
from random import choice, randint
from re import findall
from stricttuple import stricttuple
from string import ascii_letters, digits
from time import sleep
from time import time
import generators as gen
import logging
import os
import signal
import sys
import traceback
__all__ = 'battle_tested', 'fuzz', 'disable_traceback', 'enable_traceback', 'garbage', 'crash_map', 'success_map', 'results', 'stats', 'print_stats', 'function_versions', 'time_all_versions_of', 'easy_street', 'run_tests', 'multiprocess_garbage'
# try to set the encoding
attempt(lambda: (reload(sys), sys.setdefaultencoding('utf8')))
class hardware:
''' single reference of what hardware the system is working with '''
# get the count of cpu cores, if it fails, assume 1 for safety
cpu_count = attempt(multi_cpu_count, default_output=1)
single_core = cpu_count == 1
class float(float): # this patches float.__repr__ to work correctly
def __repr__(self):
if all(i in '1234567890.' for i in builtins.float.__repr__(self)):
return 'float({})'.format(builtins.float.__repr__(self))
else:
return 'float("{}")'.format(builtins.float.__repr__(self))
class complex(complex): # this patches float.__repr__ to work correctly
def __repr__(self):
return 'complex("{}")'.format(builtins.complex.__repr__(self))
def compilable(src):
return attempt(
lambda:(compile(src, 'waffles', 'exec'), True)[1] ,
False
)
def runnable(src):
return attempt(
lambda:(eval(compile(src, 'waffles', 'exec')), True)[1] ,
False
)
def runs_fine(src):
return attempt(
lambda:(eval(src), True)[1] ,
False
)
def valid_repr(o):
''' returns true if the object has a valid repr '''
return attempt(
lambda: (eval(repr(o)) == o) or (eval(repr(o)) is o),
False
)
class unittest_builder(object):
@staticmethod
def test_body(fn, test_code):
''' call this to add the code needed for a full unittest script '''
d = {
'function_path':fn.__code__.co_filename,
'function_name':fn.__name__,
'module_name':'.'.join(os.path.basename(fn.__code__.co_filename).split('.')[:-1]),
'test_code': test_code
}
return '''#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from uuid import UUID
from fractions import Fraction
import sys
import os.path
sys.path.append(os.path.dirname("{function_path}"))
from {module_name} import {function_name}
class Test_{function_name}(unittest.TestCase):
""" automated unittest generated by battle_tested """{test_code}
if __name__ == '__main__':
unittest.main()
'''.format(**d)
@staticmethod
def equal_test(test_name, invocation_code, output):
''' generate tests that assert that the input equals the output '''
return '''
def test_{}(self):
self.assertEqual({}, {})'''.format(test_name, invocation_code, repr(output))
@staticmethod
def raises_test(test_name, invocation_code, ex_type):
''' generate a unittest that asserts that a certain input raises the given exception '''
return '''
def test_{}(self):
with self.assertRaises({}):
{}'''.format(test_name, ex_type.__name__, invocation_code.replace('nan', 'float("""nan""")'))
def getsource(fn):
''' basically just inspect.getsource, only this one doesn't crash as much '''
from inspect import getsource
try:
return getsource(fn)
except:
return attempt(lambda: '{}'.format(fn), default_output='')
def pin_to_cpu(core_number):
''' pin the current process to a specific cpu to avoid dumping L1 cache'''
assert type(core_number) == int, 'pin_to_cpu needs an int as the argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.sched_setaffinity(os.getpid(), (core_number,)))
def renice(new_niceness):
''' renice the current process calling this function to the new input '''
assert type(new_niceness) == int, 'renice needs an int as its argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.nice(new_niceness))
pin_to_cpu(0) # pin this main process to the first core
renice(15) # renice this main process, idk why 15, but it gives room for priorities above and below
def shorten(string, max_length=80, trailing_chars=3):
''' trims the 'string' argument down to 'max_length' to make previews to long string values '''
assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string))
assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length))
assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars))
assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length)
assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars)
return (
string
) if len(string) <= max_length else (
'{before:}...{after:}'.format(
before=string[:max_length-(trailing_chars+3)],
after=string[-trailing_chars:] if trailing_chars>0 else ''
)
)
class easy_street:
''' This is a namespace for high speed test generation of various types '''
@staticmethod
def chars():
test_chars = ascii_letters + digits
for _ in gen.loop():
for combination in product(test_chars, repeat=4):
for i in combination:
yield i
@staticmethod
def strings():
test_strings = [
'',
'exit("######## WARNING this code is executing strings blindly ########")'
]
# this snippet rips out every word from doc strings
test_strings += list(set(findall(
r'[a-zA-Z\_]{1,}',
[v.__doc__ for v in globals().values() if hasattr(v, '__doc__')].__repr__()
)))
for _ in gen.loop():
for combination in product(test_strings, repeat=4):
for i in combination:
yield i
@staticmethod
def bools():
booleans = (True, False)
for _ in gen.loop():
for combination in product(booleans, repeat=4):
for i in combination:
yield i
@staticmethod
def ints():
numbers = tuple(range(-33,65))
for _ in gen.loop():
for combination in product(numbers, repeat=3):
for i in combination:
yield i
@staticmethod
def floats():
non_zero_ints = (i for i in easy_street.ints() if i != 0)
stream1 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 10))
stream2 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 12))
for i in stream1:
yield next(stream2)/(1.0*i)
@staticmethod
def lists():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield [st for st in islice(strat, length)]
@staticmethod
def tuples():
for i in easy_street.lists():
yield tuple(i)
@staticmethod
def dicts():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield { k:v for k,v in gen.chunks(islice(strat,length*2), 2) }
@staticmethod
def sets():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield {i for i in islice(strat, length)}
@staticmethod
def garbage():
while 1:
strategies = (
easy_street.strings(),
easy_street.ints(),
easy_street.floats(),
easy_street.bools(),
easy_street.dicts(),
easy_street.sets(),
easy_street.lists(),
easy_street.tuples()
)
for strat in gen.chain(product(strategies, repeat=len(strategies))):
yield next(strat)
def background_strategy(strats, q):
target_core = q.get()
renice(20) # maximize niceness
if not hardware.single_core:
pin_to_cpu(target_core)
q_put = q.put
for strat in cycle(strats):
try:
q_put(strat.example())
except:
pass
def background_manager(child_queues, q):
if not hardware.single_core:
pin_to_cpu(1)
renice(20)
q_put = q.put
for cq in cycle(child_queues):
try:
item = cq.get_nowait()
q_put(item)
except:
sleep(0.0001)
def multiprocess_garbage():
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.uuids(),
st.fractions(),
st.integers(),
st.decimals(),
st.dates(),
st.datetimes(),
st.dates().map(str),
st.datetimes().map(str),
st.none(),
st.text(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
lists = tuple(st.lists(elements=i) for i in basics)
tuples = tuple(st.lists(elements=i).map(tuple) for i in basics)
sets = tuple(st.sets(elements=i) for i in hashables)
dictionaries = tuple(st.dictionaries(keys=st.one_of(*hashables), values=i) for i in basics)
strats = basics + lists + tuples + sets + dictionaries
# add logic here that plays on `if hardware.single_core:` to set up single core stuff cleanly
# if more than two cores, use special core logic
# master has 0, collector has 1
if hardware.cpu_count > 2: # logic for 3 or more cores
cores_used_for_generation = hardware.cpu_count - 2
specified_cores = cycle(range(2, hardware.cpu_count))
else:
cores_used_for_generation = 1
if hardware.cpu_count == 2:
# dual core has second core do generation
specified_cores = cycle([1])
else:
# single core systems do everything on the same core
specified_cores = cycle([0])
jobs = cycle([[] for _ in range(cores_used_for_generation)])
for s in strats:
next(jobs).append(s)
jobs = [(next(jobs), Queue(4)) for _ in range(cores_used_for_generation)]
# add specific core to each job's queue
for job, q in jobs:
q.put(next(specified_cores))
processes = [
Process(target=background_strategy, args=j)
for j in jobs
]
for p in processes:
p.start()
gather_queue = Queue(16)
gather_process = Process(target=background_manager, args=([q for _, q in jobs], gather_queue))
gather_process.start()
try:
fast_alternative = easy_street.garbage()
gather_queue_full = gather_queue.full
gather_queue_get = gather_queue.get_nowait
fast_alternative_next = getattr(fast_alternative, ('next' if hasattr(fast_alternative, 'next') else '__next__'))
for _ in gen.loop(): # loop forever
try:
yield gather_queue_get()
except:
yield fast_alternative_next()
'''if gather_queue_full(): # if the queue is full, yield the value
yield gather_queue_get()
else:
for _ in range(4): # dont waste time looking for a full queue, be productive while you wait
yield next(fast_alternative)'''
except (KeyboardInterrupt, SystemExit, GeneratorExit, StopIteration):
gather_process.terminate() ###MP isn't this redundant with same sequence in finally?
gather_process.join()
for p in processes:
p.terminate()
p.join()
finally:
gather_process.terminate()
gather_process.join()
for p in processes:
p.terminate()
p.join()
class MaxExecutionTimeError(Exception):
pass
class max_execution_time:
def signal_handler(self, signum, frame):
raise self.ex_type('operation timed out')
def __init__(self, seconds, ex_type=MaxExecutionTimeError):
#print('setting timeout for {} seconds'.format(seconds))
self.seconds = 1 if seconds < 1 else seconds
self.ex_type = ex_type
def __enter__(self):
signal.signal(signal.SIGALRM, self.signal_handler)
signal.alarm(self.seconds)
def __exit__(self, *a):
signal.alarm(0) ###MP which signal is it? MAGIC NUMBERS, this is why signals have const'ed names
def hashable_strategy(s): ###MP predicates are nice to indicate with <is_condition> or ? if you're weird enough
""" Predicate stating a hash-able hypothesis strategy """
assert hasattr(s, 'example'), 'hashable_strategy needs a strategy argument' ###MP strategies are marked up with attributes not types/base class?
try:
for i in range(10):
sample = s.example()
hash(sample)
assert type(sample) != dict
except:
return False
else:
return True
def replace_strategy_repr(strat, new_repr):
""" replaces a strategy's repr and str functions with a custom one """
class custom_repr_strategy(type(strat)):
__repr__ = new_repr
__str__ = new_repr
return custom_repr_strategy(strategies=strat.original_strategies)
def build_garbage_strategy():
''' builds battle_tested's primary strategy '''
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.fractions(),
st.integers(),
st.none(),
st.text(),
st.uuids(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
# returns a strategy with only basic values
any_basics = partial(st.one_of, *basics)
# returns a strategy with only hashable values
any_hashables = partial(st.one_of, *hashables)
# returns a strategy of lists with basic values
basic_lists = partial(st.lists, elements=any_basics())
# returns a strategy of lists with hashable values
hashable_lists = partial(st.lists, elements=any_basics())
iterable_strategies = (
# iterables with the same type inside
st.builds(lambda a:[i for i in a if type(a[0])==type(i)], basic_lists(min_size=3)),
st.builds(lambda a:tuple(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
#st.builds(lambda a:{i for i in a if type(a[0])==type(i)}, hashable_lists(min_size=3)),
st.iterables(elements=any_basics()),
#st.builds(lambda a:(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
# garbage filled iterables
st.builds(tuple, basic_lists()),
#st.builds(set, hashable_lists()),
st.dictionaries(keys=any_hashables(), values=any_basics())
)
# returns a strategy with only iterable values
any_iterables = partial(st.one_of, *iterable_strategies)
return st.one_of(any_basics(), any_iterables())
garbage = replace_strategy_repr(build_garbage_strategy(), lambda s:'<garbage>')
class storage():
""" where battle_tested stores things """
test_inputs = deque()
results = {}
@staticmethod
def build_new_examples(how_many=100):
""" use this to add new examples to battle_tested's pre-loaded examples in storage.test_inputs """
assert type(how_many) == int, 'build_new_examples needs a positive int as the argument'
assert how_many > 0, 'build_new_examples needs a positive int as the argument'
@settings(max_examples=how_many)
@given(garbage)
def garbage_filler(i):
try:
storage.test_inputs.append(i)
except:
pass
try:
garbage_filler()
except:
pass
@staticmethod
def refresh_test_inputs():
""" wipe battle_tested test_inputs and cache new examples """
storage.test_inputs.clear()
try:
# just fill test inputs with something to start with
storage.test_inputs.append('waffles') # easter egg :)
for i in islice(easy_street.garbage(), 64):
storage.test_inputs.append(i)
storage.build_new_examples()
except Exception as e:
pass
storage.build_new_examples.garbage = garbage
class io_example(object):
""" demonstrates the behavior of input and output """
def __init__(self, input_args, output):
self.input = input_args
self.output = output
def __repr__(self):
return '{} -> {}'.format(self.input,self.output)
def __str__(self):
return '{} -> {}'.format(self.input,self.output) ### why not pull value of __repr__? .format cant be cheap, it's parsing and interpolation
def __hash__(self):
return hash('io_example') + hash(self.__repr__())
def __eq__(self, target):
return hasattr(target, '__hash__') and self.__hash__() == target.__hash__()
class suppress(): ###MP dead code? i dont see it referenced anywhere?
""" suppress exceptions coming from certain code blocks """
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
return exctype is not None and issubclass(exctype, self._exceptions)
def is_py3():
return sys.version_info >= (3, 0)
class UniqueCrashContainer(tuple):
''' a pretty printable container for crashes '''
def __repr__(self):
try:
table = PrettyTable(('exception type','arg types','location','crash message'), sortby='exception type')
table.align["exception type"] = "l"
table.align["arg types"] = "l"
table.align["location"] = "l"
table.align["crash message"] = "l"
for i in self:
table.add_row((i.err_type.__name__,repr(tuple(i.__name__ for i in i.arg_types)),[x for x in i.trace.split(', ') if x.startswith('line ')][-1],i.message))
return table.get_string()
except:
return tuple.__repr__(self)
class PrettyTuple(tuple):
''' tuples with better pretty printing '''
def __repr__(self):
if len(self) > 0:
try:
table = PrettyTable(None)
try:
tup = tuple(sorted(self, key=repr))
except:
tup = self
for i in tup:
if isinstance(i, tuple):
t = tuple(x.__name__ if isinstance(x,type) and hasattr(x,'__name__') else repr(x) for x in i)
table.add_row(t)
else:
if isinstance(i, type):
if hasattr(i, '__name__'):
i=i.__name__
else:
i=repr(i)
table.add_row((i,))
#table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
except:
return tuple.__repr__(self)
else:
return '()'
class tb_controls():
old_excepthook = sys.excepthook
no_tracebacklimit_on_sys = 'tracebacklimit' not in dir(sys)
old_tracebacklimit = (sys.tracebacklimit if 'tracebacklimit' in dir(sys) else None)
traceback_disabled = False
@staticmethod
def disable_traceback():
if is_py3():
sys.tracebacklimit = None
else:
sys.excepthook = lambda t, v, n:tb_controls.old_excepthook(t, v, None)
tb_controls.traceback_disabled = True
@staticmethod
def enable_traceback():
if tb_controls.traceback_disabled:
if is_py3():
if tb_controls.no_tracebacklimit_on_sys:
del sys.tracebacklimit
else:
sys.tracebacklimit = tb_controls.old_tracebacklimit
else:
sys.excepthook = tb_controls.old_excepthook
tb_controls.traceback_disabled = False
def enable_traceback():
""" disables tracebacks from being added to exception raises """
tb_controls.enable_traceback()
def disable_traceback():
""" enables tracebacks to be added to exception raises """
tb_controls.disable_traceback()
def traceback_file_lines(trace_text=None):
""" this returns a list of lines that start with file in the given traceback
usage:
traceback_steps(traceback.format_exc())
"""
# split the text into traceback steps
return [i for i in trace_text.splitlines() if i.startswith(' File "') and '", line' in i] ###MP extract out the condition for readability?
def traceback_steps(trace_text=None):
""" this generates the steps in a traceback
usage:
traceback_steps(traceback.format_exc())
"""
if trace_text == None: ### is None?
trace_text = traceback.format_exc()
# get rid of the first line with traceback
trace_text = ('\n'.join(trace_text.splitlines()[1:-1])) ### split text to rejoin without first and last? why not just slice the middle out?
# split the text into traceback steps
file_lines = [i for i in trace_text.splitlines() if '", line' in i and i.startswith(' File "') ]
# build the output
out = []
for i in trace_text.splitlines():
if i in file_lines:
if len(out):
yield '\n'.join(out) ###MP why split then rejoin later again?
out = [i]
else:
out.append(i)
yield '\n'.join(out)
def traceback_text():
""" this returns the traceback in text form """
return('\n'.join(i for i in traceback_steps()))
def format_error_message(f_name, err_msg, trace_text, evil_args):
top_line = " battle_tested crashed {f_name:}() ".format(f_name=f_name)
while len(top_line) < 79:
top_line = "-{}-".format(top_line)
top_line = '\n\n{}'.format(top_line)
bottom_line = '-'*len(top_line)
break_path = trace_text.split('"')[1]
break_line_number = int(trace_text.split(',')[1].split(' ')[-1])
break_line_number_up = break_line_number-1
break_line_number_down = break_line_number+1
out = """{top_line:}
Error Message:
{err_msg:}
Breakpoint: {break_path:} - line {break_line_number:}""".format(
top_line=top_line,
err_msg=err_msg,
break_path=break_path,
break_line_number=break_line_number
) ###MP put the fields in a dict, let format unpack it into the right fields
try:
with open(break_path) as f:
for i, line in enumerate(f):
i+=1
if i == break_line_number_up:
line_above=line.replace('\n','')
if i == break_line_number:
break_line=line.replace('\n','')
if i == break_line_number_down:
line_below=line.replace('\n','')
out += """
{break_line_number_up:>{num_len:}}|{line_above:}
->{break_line_number:>{num_len:}}|{break_line:}
{break_line_number_down:>{num_len:}}|{line_below:}""".format(
break_line_number_up=break_line_number_up,
break_line_number=break_line_number,
break_line_number_down=break_line_number_down,
line_above=line_above,
line_below=line_below,
break_line=break_line,
num_len=len(str(break_line_number_down))+1
)
except Exception as ex:
# i only want this part if the whole file read works
pass
out += """
To reproduce this error, run:
{f_name:}{evil_args:}
{bottom_line:}
""".format(
bottom_line=bottom_line,
f_name=f_name,
evil_args=evil_args,
)
return out
class generators(object):
def started(generator_function):
""" starts a generator when created """
def wrapper(*args, **kwargs):
g = generator_function(*args, **kwargs)
next(g)
return g
return wrapper
@staticmethod
@started
def sum():
"generator that holds a sum"
total = 0
while 1:
total += yield total
@staticmethod
@started
def counter(): ###MP why does a counter need to be a generator?
"""generator that holds a sum"""
c = 0
while 1:
i = yield c
if i is None:
c += 1
else:
c += i
@staticmethod
@started
def avg():
""" generator that holds a rolling average """
count = 0.0
total = generators.sum()
i=0
while 1:
i = yield (((total.send(i)*1.0)/count) if count else 0)
count += 1
@staticmethod
def timer():
""" generator that tracks time """
start_time = time()
while 1:
yield time()-start_time
@staticmethod
def countdown(seconds):
""" yields True until time expires """
start = time()
while 1:
yield time()-start < seconds
@staticmethod
def chunks(itr, size): ###MP isn't this a copy of stuff from generators?
""" yields a windowed chunk of a given size """
out = deque(maxlen=size)
for i in itr:
out.append(i)
if len(out) == size:
yield tuple(out)
out.clear()
@staticmethod
def chain(*a): ###MP isn't this a copy of stuff from generators?
"""itertools.chain, just better"""
for g in a:
if hasattr(g, '__iter__'):
# iterate through if its iterable
for i in g:
yield i
else:
# just yield the whole thing if its not
yield g
@staticmethod
def every_possible_object(iterable):
""" like flatten, just more desperate """
try:
for i in iterable:
yield i
if isinstance(i, dict):
for k in i:
yield k
for v in i.values():
for i in generators.every_possible_object(v):
yield i
elif isinstance(i, (list,tuple,set)):
for i in generators.every_possible_object(i):
yield i
except TypeError:
pass
yield iterable
class FuzzTimeoutError(BaseException):
pass
from threading import Timer
class IntervalTimer(object): ###MP some classes are explicitly inheriting from object, others are not. Inconsistent
""" run functions on intervals in the background
by: Cody Kochmann
"""
def __init__(self, seconds, function):
assert type(seconds).__name__ in ('int','float')
assert callable(function)
self.seconds=seconds
self.function=function
self.stopped=False
self.running=False
self.thread=Timer(self.seconds,self.function)
def start(self):
if self.thread.is_alive():
self.thread.join()
if not self.stopped:
if not self.running:
self.function()
self.running=True
self.thread=Timer(self.seconds,self.function)
self.thread.start()
self.restart_thread=Timer(self.seconds, self.start)
self.restart_thread.start()
def stop(self):
self.stopped = True
self.running = False
try:
self.thread.cancel()
except AttributeError: pass
try:
self.restart_thread.cancel()
except AttributeError: pass
from io import StringIO
def run_silently(fn):
""" runs a function silently with no stdout """
stdout_holder = sys.stdout
sys.stdout = StringIO()
fn()
sys.stdout = stdout_holder
class ipython_tools(object):
""" tools to make battle_tested work with ipython nicely """
detected = 'IPython' in sys.modules
if detected:
from IPython import get_ipython
detected = get_ipython() is not None
if detected:
magic = get_ipython().magic
@staticmethod
def silence_traceback():
""" silences ipythons verbose debugging temporarily """
if ipython_tools.detected:
# this hijacks stdout because there is a print in ipython.magic
run_silently(lambda:ipython_tools.magic("xmode Plain"))
@staticmethod
def verbose_traceback():
""" re-enables ipythons verbose tracebacks """
if ipython_tools.detected:
ipython_tools.magic("xmode Verbose")
def function_arg_count(fn):
""" finds how many args a function has """
assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn))
if hasattr(fn, '__code__') and hasattr(fn.__code__, 'co_argcount'):
# normal functions
return fn.__code__.co_argcount
elif hasattr(fn, 'args') and hasattr(fn, 'func') and hasattr(fn, 'keywords'):
# partials
return function_arg_count(fn.func) - (len(fn.args)+len(fn.keywords))
else:
number_of_args_that_work = []
for i in range(1,64):
try:
fn(*range(i))
except TypeError as ex:
search = findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))
our_specific_type_error = len(repr(findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))))>10
if not our_specific_type_error: # if you find something
number_of_args_that_work.append(i)
pass
except Exception:
#number_of_args_that_work.append(i)
pass
else:
number_of_args_that_work.append(i)
if len(number_of_args_that_work):
return min(number_of_args_that_work)
#logging.warning('using backup plan')
return 1 # not universal, but for now, enough... :/
class battle_tested(object):
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
Or:
from battle_tested import battle_tested
@battle_tested(keep_testing=False, allow=(AssertionError,))
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
def __init__(self, seconds=6, max_tests=1000000, keep_testing=True, verbose=False, quiet=False, allow=(), strategy=garbage, **kwargs):
""" your general constructor to get things in line """
# this is here if someone decides to use it as battle_tested(function)
if callable(seconds):
raise Exception('\n\n\tyou gave battle_tested() a function as the argument, did you mean battle_tested.fuzz()?')
self.kwargs = kwargs
self.tested = False
# needed to determine how quiet it will be
self.__verify_quiet__(quiet)
self.quiet = quiet
# needed to determine how verbosly it will work
self.__verify_verbose__(verbose)
self.verbose = False if self.quiet else verbose # quiet silences verbose mode
# needed to determine the maximum time the tests can run
self.__verify_seconds__(seconds)
self.seconds = seconds
# determine whether to keep testing after finding a crash
self.__verify_keep_testing__(keep_testing)
self.keep_testing = keep_testing
# needed to determine maximum number of tests it can
self.__verify_max_tests__(max_tests)
self.max_tests = max_tests
# determine what kind of exceptions are allowed
self.__verify_allow__(allow)
self.allow = allow
# determine what kind of strategy to use
self.__verify_strategy__(strategy)
self.strategy = strategy
@staticmethod
def __verify_seconds__(seconds):
assert type(seconds) == int, 'battle_tested needs seconds to be an int, not {0}'.format(repr(seconds))
assert seconds > 0, 'battle_tested needs seconds to be a positive int, not {0}'.format(repr(seconds))
@staticmethod
def __verify_verbose__(verbose):
""" asserts that verbose is valid """
assert type(verbose) == bool, 'battle_tested needs verbose to be a bool, not {0}'.format(repr(verbose))
@staticmethod
def __verify_max_tests__(max_tests):
""" asserts that max_tests is valid """
assert type(max_tests) == int, 'battle_tested needs max_tests to be an int, not {0}'.format(repr(max_tests))
assert max_tests > 0, 'battle_tested needs max_tests to be a positive int, not {0}'.format(repr(max_tests))
@staticmethod
def __verify_function__(fn):
""" asserts that the input is a function """
assert callable(fn), 'battle_tested needs a callable function, not {0}'.format(repr(fn))
@staticmethod
def __verify_tested__(fn):
""" asserts that the function exists in battle_tested's results """
battle_tested.__verify_function__(fn)
assert fn in storage.results.keys(), '{} was not found in battle_tested\'s results, you probably haven\'t tested it yet'.format(fn)
@staticmethod
def __verify_keep_testing__(keep_testing):
""" ensures keep_testing is a valid argument """
assert type(keep_testing) == bool, 'keep_testing needs to be a bool'
assert keep_testing == True or keep_testing == False, 'invalid value for keep_testing'
@staticmethod
def __verify_quiet__(quiet):
""" ensures quiet is a valid argument """
assert type(quiet) == bool, 'quiet needs to be a bool'
assert quiet == True or quiet == False, 'invalid value for quiet'
@staticmethod
def __verify_allow__(allow):
""" ensures allow is a valid argument """
assert type(allow) == tuple, 'allow needs to be a tuple of exceptions'
assert all(issubclass(i, BaseException) for i in allow), 'allow only accepts exceptions as its members'
@staticmethod
def __verify_args_needed__(args_needed):
""" ensures args_needed is a valid number of args for a function """
assert type(args_needed) == int, 'args_needed needs to be a positive int'
assert args_needed > 0, 'args_needed needs to be a positive int'
@staticmethod
def __verify_strategy__(strategy):
""" ensures strategy is a strategy or tuple of strategies """
def is_strategy(strategy):
assert 'strategy' in type(strategy).__name__.lower(), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
assert hasattr(strategy,'example'), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
return True
if type(strategy) == tuple:
assert len(strategy)>0, 'strategy cannot be an empty tuple, please define at least one'
assert all(is_strategy(i) for i in strategy), 'not all members in strategy were valid hypothesis strategies'
else:
is_strategy(strategy)
# results are composed like this
# results[my_function]['unique_crashes']=[list_of_crashes]
# results[my_function]['successes']=[list_of_successes]
# safe container that holds crash results
Crash = stricttuple(
'Crash',
arg_types = (
lambda arg_types:type(arg_types)==tuple,
lambda arg_types:len(arg_types)>0,
),
args = (
lambda args:type(args)==tuple,
lambda args:len(args)>0,
),
message = (
lambda message:type(message).__name__ in 'str unicode NoneType' ,
),
err_type = (
lambda err_type:type(err_type)==type ,
),
trace = (
lambda trace:type(trace).__name__ in 'str unicode' ,
)
)
class Result(object):
''' container that holds test results '''
def __init__(self, successful_input_types, crash_input_types, iffy_input_types, output_types, exception_types, unique_crashes, successful_io, function):
# assertions for successful_input_types
assert type(successful_input_types)==PrettyTuple
assert all(type(i)==tuple for i in successful_input_types)
assert all(all(isinstance(x,type) for x in i) for i in successful_input_types)
# assertions for crash_input_types
assert type(crash_input_types)==PrettyTuple
assert all(type(i)==tuple for i in crash_input_types)
assert all(all(isinstance(x,type) for x in i) for i in crash_input_types)
# assertions for iffy_input_types
assert type(iffy_input_types)==PrettyTuple
assert all(type(i)==tuple for i in iffy_input_types)
assert all(all(isinstance(x,type) for x in i) for i in iffy_input_types)
# assertions for output_types
assert type(output_types)==PrettyTuple
assert all(isinstance(i, type) for i in output_types)
# assertions for exception_types
assert type(exception_types)==PrettyTuple
assert all(isinstance(i,Exception) or issubclass(i,Exception) for i in exception_types)
# assertions for unique_crashes
assert type(unique_crashes)==UniqueCrashContainer
# assertions for successful_io
assert type(successful_io)==deque
assert all(type(i) == io_example for i in successful_io) if len(successful_io) else 1
self.successful_input_types = successful_input_types
self.crash_input_types = crash_input_types
self.iffy_input_types = iffy_input_types
self.output_types = output_types
self.exception_types = exception_types
self.unique_crashes = unique_crashes
self.successful_io = successful_io
self.function = function
self.unittest = attempt(self._generate_unit_test)
self._fields = 'successful_input_types', 'crash_input_types', 'iffy_input_types', 'output_types', 'exception_types', 'unique_crashes', 'successful_io'
def __repr__(self):
table = PrettyTable(None)
for i in sorted(self._fields):
new_lines_in_repr = repr(getattr(self,i)).count('\n')
if new_lines_in_repr > 0:
ii = '{}{}'.format('\n'*int(new_lines_in_repr/2), i)
else:
ii = i
if i == 'successful_io':
table.add_row((ii, repr(getattr(self,i))[7:-2]))
else:
table.add_row((ii, getattr(self,i)))
table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
def _generate_unit_test(self):
''' give this a function to fuzz and it will spit out a unittest file '''
# I know the code in this function is a little hateful, its brand new
# and I'll clean it up as soon as I'm certain it is where it needs to be
# negative tests
negative_tests = deque()
for i in self.unique_crashes:
#logging.warning(repr(i))
invocation_code = '{}{}'.format(self.function.__name__, repr(i.args))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(i.args):
#logging.warning(invocation_code)
test_name = 'raises_{}'.format(i.err_type.__name__)
negative_tests.append(unittest_builder.raises_test(test_name, invocation_code, i.err_type))
#else:
# logging.warning('not runnable')
# logging.warning(repr(invocation_code))
# positive tests
positive_tests = deque()
for c, io_object in enumerate(self.successful_io):
io_object.input = tuple(float(i) if type(i)==builtins.float else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(float(i) if type(i)==builtins.float else i for i in io_object.output) ,
default_output=io_object.output
)
io_object.input = tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.output) ,
default_output=io_object.output
)
if type(io_object.output) == builtins.complex:
io_object.output = complex(io_object.output)
if type(io_object.output) == builtins.float:
io_object.output = float(io_object.output)
invocation_code = '{}{}'.format(self.function.__name__, repr(io_object.input))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(io_object.input) and valid_repr(io_object.output):
if all(runs_fine(repr(i)) for i in (io_object.input, io_object.output)):
positive_tests.append((invocation_code, io_object.output))
positive_tests = [
unittest_builder.equal_test('equals_{}'.format(i+1), *v)
for i, v in enumerate(positive_tests)
]
#print(negative_tests)
#print(positive_tests)
positive_tests = ''.join(sorted(positive_tests))
negative_tests = ''.join(sorted(negative_tests))
test_functions = negative_tests + positive_tests
#print(test_functions)
return unittest_builder.test_body(self.function, test_functions)
@staticmethod
def results(fn):
'''returns the collected results of the given function'''
battle_tested.__verify_tested__(fn)
return storage.results[fn]
@staticmethod
def stats(fn):
''' returns the stats found when testing a function '''
results = battle_tested.results(fn)
return {k:len(getattr(results, k)) for k in results._fields}
@staticmethod
def print_stats(fn):
''' prints the stats on a tested function '''
stats = battle_tested.stats(fn)
fn_name = fn.__name__ if '__name__' in dir(fn) else fn
s = 'fuzzing {}() found:'.format(fn_name)
s += ' '*(79-len(s))
print(s)
t=PrettyTable(None)
for k in sorted(stats.keys()):
t.add_row((k,stats[k]))
print('\n'.join(t.get_string().splitlines()[2:]))
# these two are here so the maps can have doc strings
class _crash_map(dict):
'''a map of crashes generated by the previous test'''
class _success_map(set):
'''a map of data types that were able to get through the function without crashing'''
crash_map = _crash_map()
success_map = _success_map()
@staticmethod
def generate_examples(args_needed=1, strategy=None):
""" this is the primary argument generator that battle_tested runs on """
battle_tested.__verify_args_needed__(args_needed)
if strategy is not None: # logic for a custom strategy
battle_tested.__verify_strategy__(strategy)
if type(strategy) == tuple:
assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
print('using {} custom strategies - {}'.format(len(strategy),strategy))
strategy = st.builds(lambda *x: list(x), *strategy)
ex = strategy.example
for _ in gen.loop():
yield ex()
else:
# generate lists containing output only from the given strategy
ex = strategy.example
for _ in gen.loop():
out = [ex() for i in range(args_needed)]
for i in product(out, repeat=len(out)):
yield i
else: # logic for fuzzing approach
# first run through the cache
storage.refresh_test_inputs()
for chunk in generators.chunks(chain(storage.test_inputs, reversed(storage.test_inputs)),size=args_needed):
for combination in product(chunk, repeat=args_needed):
yield combination
try:
garbage = multiprocess_garbage()
while 2:
out = [next(garbage) for i in range(args_needed)]
for i in product(out, repeat=len(out)):
yield i
finally:
garbage.close()
@staticmethod
def fuzz(fn, seconds=6, max_tests=1000000000, verbose=False, keep_testing=True, quiet=False, allow=(), strategy=garbage):
"""
fuzz - battle_tested's primary weapon for testing functions.
Example Usage:
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
# or
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
fuzz(my_strict_add, keep_testing=False, allow=(AssertionError,))
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
battle_tested.__verify_function__(fn)
battle_tested.__verify_seconds__(seconds)
battle_tested.__verify_verbose__(verbose)
battle_tested.__verify_max_tests__(max_tests)
battle_tested.__verify_keep_testing__(keep_testing)
battle_tested.__verify_quiet__(quiet)
battle_tested.__verify_allow__(allow)
battle_tested.__verify_strategy__(strategy)
using_native_garbage = hash(strategy) == hash(garbage)
args_needed = function_arg_count(fn)
# code for instance methods
if hasattr(fn, '__self__'):
# create a partial with fn.__self__ as the first arg
#fn = partial(fn, fn.__self__)
_name = repr(fn)
_type = type(fn).__name__
#print(dir(fn))
# wrap the method in a hashable wrapper
fn = partial(fn)
fn.__name__ = _name
# if fn is not a builtin, chop off one arg needed
if 'builtin' not in _type and args_needed > 1:
args_needed = args_needed-1
del _name
del _type
#if type(strategy) == tuple:
# assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
# print('using {} custom strategies - {}'.format(len(strategy),strategy))
# strategy = st.builds(lambda *x: list(x), *strategy)
#else:
# # generate a strategy that creates a list of garbage variables for each argument
# strategy = st.lists(elements=strategy, max_size=args_needed, min_size=args_needed)
if not quiet:
print('testing: {0}()'.format(getattr(fn, '__name__', repr(fn))))
battle_tested.crash_map.clear()
battle_tested.success_map.clear()
count = generators.counter()
average = generators.avg()
timer = generators.timer()
def calculate_window_speed():
w = calculate_window_speed.window
w.append(_inner_window_speed())
return int((1.0*sum(w))/len(w))
calculate_window_speed.window = deque(maxlen=4)
def _inner_window_speed():
cw = display_stats.count_window
tw = display_stats.time_window
if len(cw) == 2:
c = cw[1]-cw[0]
t = tw[1]-tw[0]
if c != 0 and t != 0:
out = int(c*(1/t))
return out if out > 0 else 1
return 1
def display_stats(overwrite_line=True):
now = next(display_stats.timer)
display_stats.remaining = display_stats.test_time-now
if not display_stats.quiet:
display_stats.count_window.append(display_stats.count)
display_stats.time_window.append(now)
print('tests: {:<8} speed: {:>6}/sec avg:{:>6}/sec {} {}s '.format(
display_stats.count,
calculate_window_speed(),
int(display_stats.count/(now if now > 0 else 0.001)),
'-' if overwrite_line else 'in',
int(display_stats.test_time-now)+1 if overwrite_line else display_stats.test_time
), end=('\r' if overwrite_line else '\n'))
sys.stdout.flush()
display_stats.test_time = seconds
display_stats.remaining = display_stats.test_time
display_stats.count = 0
display_stats.time_window = deque(maxlen=2)
display_stats.count_window = deque(maxlen=2)
display_stats.timer = generators.timer()
display_stats.average = generators.avg()
display_stats.interval = IntervalTimer(0.16, display_stats)
display_stats.quiet = quiet or verbose
display_stats.start = lambda:(next(display_stats.timer),display_stats.interval.start())
ipython_tools.silence_traceback()
storage.results[fn] = {
'successful_input_types':deque(maxlen=512),
'crash_input_types':set(),
'iffy_input_types':set(), # types that both succeed and crash the function
'output_types':set(),
'exception_types':set(),
'unique_crashes':dict(),
'successful_io':deque(maxlen=512)
}
def fn_info():
pass
fn_info.fuzz_time = time()
fn_info.fuzz_id = len(storage.results.keys())
# stores examples that succeed and return something other than None
fn_info.successful_io = deque(maxlen=512)
# stores examples that return None
fn_info.none_successful_io = deque(maxlen=512)
gc_interval = IntervalTimer(3, gc)
#@settings(perform_health_check=False, database_file=None, deadline=None, max_examples=max_tests, verbosity=(Verbosity.verbose if verbose else Verbosity.normal))
#@given(strategy)
def _fuzz(given_args):
if _fuzz.first_run:
_fuzz.first_run = False
# start the display interval
display_stats.start()
# start the countdown for timeout
_fuzz.timestopper.start()
arg_list = tuple(given_args)
#if len(arg_list) != fuzz.args_needed:
# exit('got {} args? {}'.format(len(arg_list),next(test_variables)))
# unpack the arguments
if not _fuzz.has_time:
raise FuzzTimeoutError()
display_stats.count += 1
try:
with max_execution_time(int(display_stats.remaining)):
out = fn(*arg_list)
# if out is a generator, empty it out.
if hasattr(out, '__iter__') and (hasattr(out,'__next__') or hasattr(out,'next')):
for i in out:
pass
# the rest of this block is handling logging a success
input_types = tuple(type(i) for i in arg_list)
# if the input types have caused a crash before, add them to iffy_types
if input_types in storage.results[fn]['crash_input_types']:
storage.results[fn]['iffy_input_types'].add(input_types)
# add the input types to the successful collection
if input_types not in storage.results[fn]['successful_input_types']:
storage.results[fn]['successful_input_types'].append(input_types)
# add the output type to the output collection
storage.results[fn]['output_types'].add(type(out))
battle_tested.success_map.add(tuple(type(i) for i in arg_list))
try:
(fn_info.none_successful_io if out is None else fn_info.successful_io).append(io_example(arg_list, out))
'''
# I want to add this, but it wrecks the fuzzer's performance :(
io_object = io_example(arg_list, out)
if out is None:
if io_object not in fn_info.none_successful_io:
fn_info.none_successful_io.append(io_object)
else:
if io_object not in fn_info.successful_io:
fn_info.successful_io.append(io_object)
'''
except:
pass
except MaxExecutionTimeError:
pass
except _fuzz.allow as ex:
pass
except Exception as ex:
ex_message = ex.args[0] if (
hasattr(ex, 'args') and len(ex.args) > 0
) else (ex.message if (
hasattr(ex, 'message') and len(ex.message) > 0
) else '')
storage.results[fn]['crash_input_types'].add(tuple(type(i) for i in arg_list))
if keep_testing:
tb_text = traceback_text()
tb = '{}{}'.format(traceback_file_lines(tb_text),repr(type(ex)))
battle_tested.crash_map[tb]={'type':type(ex),'message':ex_message,'args':arg_list,'arg_types':tuple(type(i) for i in arg_list)}
storage.results[fn]['unique_crashes'][tb]=battle_tested.Crash(
err_type=type(ex),
message=repr(ex_message),
args=arg_list,
arg_types=tuple(type(i) for i in arg_list),
trace=str(tb_text)
)
storage.results[fn]['exception_types'].add(type(ex))
else:
# get the step where the code broke
tb_steps_full = [i for i in traceback_steps()]
tb_steps_with_func_name = [i for i in tb_steps_full if i.splitlines()[0].endswith(fn.__name__)]
if len(tb_steps_with_func_name)>0:
tb = tb_steps_with_func_name[-1]
else:
tb = tb_steps_full[-1]
error_string = format_error_message(
fn.__name__,
'{} - {}'.format(type(ex).__name__,ex_message),
tb,
(arg_list if len(arg_list)!=1 else '({})'.format(repr(arg_list[0])))
)
ex.message = error_string
ex.args = error_string,
raise ex
_fuzz.has_time = True
_fuzz.first_run = True
_fuzz.timestopper = Timer(seconds, lambda:setattr(_fuzz,'has_time',False))
_fuzz.exceptions = deque()
_fuzz.args_needed = args_needed
_fuzz.allow = allow
_fuzz.using_native_garbage = using_native_garbage
# run the test
test_gen = battle_tested.generate_examples(args_needed, None if using_native_garbage else strategy)
next(test_gen) # start the test generator
try:
gc_interval.start()
for test_args in test_gen:
if verbose:
try:
s = '{}'.format(tuple(test_args))
s = s[:-2]+s[-1]
print('trying {}{}'.format(fn.__name__, s))
except: pass
_fuzz(test_args)
max_tests -= 1
if max_tests <= 0:
break
except FuzzTimeoutError:
pass
except KeyboardInterrupt:
if not quiet:
print(' stopping fuzz early...')
finally:
attempt(test_gen.close)
display_stats.interval.stop()
display_stats(False)
gc_interval.stop()
attempt(_fuzz.timestopper.cancel)
if not display_stats.quiet:
print('compiling results...')
results_dict = storage.results[fn]
results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
# merge the io maps
for i in fn_info.none_successful_io:
#if len(fn_info.successful_io)<fn_info.successful_io.maxlen:
fn_info.successful_io.append(i)
# remove io map with None examples
del fn_info.none_successful_io
storage.results[fn] = battle_tested.Result(
successful_input_types=PrettyTuple(set(i for i in results_dict['successful_input_types'] if i not in results_dict['iffy_input_types'] and i not in results_dict['crash_input_types'])),
crash_input_types=PrettyTuple(results_dict['crash_input_types']),
iffy_input_types=PrettyTuple(results_dict['iffy_input_types']),
output_types=PrettyTuple(results_dict['output_types']),
exception_types=PrettyTuple(results_dict['exception_types']),
unique_crashes=UniqueCrashContainer(results_dict['unique_crashes'].values()),
successful_io=fn_info.successful_io,
function=fn
)
storage.results[fn].function = fn
## find the types that both crashed and succeeded
#results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
## clean up the unique_crashes section
#results_dict['unique_crashes'] = tuple(results_dict['unique_crashes'].values())
## remove duplicate successful input types
#results_dict['successful_input_types'] = set(results_dict['successful_input_types'])
if keep_testing:
#examples_that_break = ('examples that break' if len(battle_tested.crash_map)>1 else 'example that broke')
#print('found {} {} {}()'.format(len(battle_tested.crash_map),examples_that_break,fn.__name__))
if not quiet:
battle_tested.print_stats(fn)
#print('run crash_map() or success_map() to access the test results')
else:
if not quiet:
print('battle_tested: no falsifying examples found')
# try to save the fields to the function object
try:
for f in storage.results[fn]._fields:
setattr(fn, f, getattr(storage.results[fn], f))
except: pass
# try to store the unique crashes as readable attributes
try:
for crash in storage.results[fn].unique_crashes:
try:
setattr(fn_info.unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
try:
setattr(storage.results[fn].unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
except: pass
try:
def dummy_function(): pass
for a in dir(fn_info):
if a not in dir(dummy_function):
try:
setattr(fn, a, getattr(fn_info, a))
except:
pass
except: pass
return storage.results[fn]
def __call__(self, fn):
""" runs before the decorated function is called """
self.__verify_function__(fn)
if fn not in storage.results:
# only test the first time this function is called
if not ('skip_test' in self.kwargs and self.kwargs['skip_test']):
# skip the test if it is explicitly turned off
self.fuzz(fn, seconds=self.seconds, max_tests=self.max_tests, keep_testing=self.keep_testing, verbose=self.verbose, quiet=self.quiet, allow=self.allow, strategy=self.strategy)
#self.tested = True
if any(i in self.kwargs for i in ('logger','default_output')):
# only wrap if needed
def wrapper(*args, **kwargs):
try:
out = fn(*args, **kwargs)
except Exception as e:
# log the error
if 'logger' in self.kwargs:
assert callable(self.kwargs['logger']), "battle_tested.logger needs to be a callable log function, not: {0}".format(repr(self.kwargs['logger']))
self.kwargs['logger'](e)
else:
logging.exception(e)
# only raise the error if there isnt a default_output
if 'default_output' in self.kwargs:
out = self.kwargs['default_output']
else:
raise e
return out
return wrapper
else:
return fn
# make fuzz its own independent function
fuzz = battle_tested.fuzz
results = battle_tested.results
stats = battle_tested.stats
print_stats = battle_tested.print_stats
def crash_map():
'''returns a map of crashes generated by the previous test'''
return tuple(sorted(battle_tested.crash_map.values(), key=lambda i:i['type'].__name__))
def success_map():
'''returns a map of data types that were able to get through the function without crashing'''
return tuple(sorted(battle_tested.success_map, key=lambda i:i[0].__name__))
def function_versions(fn):
''' returns all tested versions of the given function '''
for f in storage.results.keys():
if f.__name__ == fn.__name__ and f.__module__ == fn.__module__:
yield f
def time_io(fn,args,rounds=1000):
''' time how long it takes for a function to run through given args '''
tests = range(rounds)
args = tuple(args) # solidify this so we can run it multiple times
start = time()
for t in tests:
for a in args:
fn(*a)
return time()-start
def all_common_successful_io(*functions):
''' gets all io objects that works with all given '''
for io in generators.chain(*(fn.successful_io for fn in functions)):
succeeded = 0
for fn in functions:
try:
out = fn(*io.input)
if hasattr(out, '__iter__'):
for i in out:
pass
succeeded += 1
except:
pass
if succeeded == len(functions):
yield io
def time_all_versions_of(fn):
''' time how long each version of a function takes to run through the saved io '''
print('\ntiming all versions of {}'.format(fn.__name__))
common_io = partial(all_common_successful_io, *list(function_versions(fn)))
print('found {} inputs that all versions can run'.format(len(list(common_io()))))
for f in function_versions(fn):
print('\n{}\n\n{}'.format('-'*60,getsource(f)))
print('{:.10f}'.format(time_io(f,(io.input for io in common_io()))),'seconds')
#print(time_io(f,(io.input for io in f.successful_io)),'seconds with {} runs'.format(len(f.successful_io)*1000))
# for ff in function_versions(fn):
# #print(time_io(f,(io.input for io in ff.successful_io)),'seconds')
print('\n{}'.format('-'*60))
def run_tests():
''' this is where all of the primary functionality of battle_tested is tested '''
# test instance methods
class TestClass(tuple):
def testmethod(self,a,b,c,d,e):
return a,b,c,d
tc = TestClass([1,2,3])
print(fuzz(tc.testmethod))
l = list(range(10))
print(fuzz(l.append))
# test fuzzing all the types
for i in (str, bool, bytearray, bytes, complex, dict, float, frozenset, int, list, object, set, str, tuple):
print('testing: {}'.format(i))
print(fuzz(i))
def test_generator(a):
for i in a:
yield i
print(fuzz(test_generator, seconds=10))
def test_generator(a):
for i in a:
yield i,i
print(fuzz(test_generator, seconds=10))
print(time_all_versions_of(test_generator))
# try the custom strategy syntax
@battle_tested(strategy=st.text(),max_tests=50)
def custom_text_strategy(a,b):
if len(a) == 0:
return None
else:
return a in b
print(dir(custom_text_strategy))
for i in ('successful_io','crash_input_types','exception_types','iffy_input_types','unique_crashes','output_types','successful_input_types'):
assert hasattr(custom_text_strategy, i), 'custom_text_strategy doesnt have a {} attribute'.format(i)
def custom_text_fuzz_strategy(a,b):
return a in b
fuzz(custom_text_fuzz_strategy, strategy=st.text())
# try the multiple custom strategy syntax
@battle_tested(strategy=(st.text(), st.integers()))
def custom_text_int_strategy(a,b):
assert isinstance(a, str), 'a needs to be text'
assert isinstance(b, int), 'b needs to be an int'
return a+b
def custom_text_int_fuzz_strategy(a,b):
return a in b
r=fuzz(custom_text_fuzz_strategy, strategy=(st.integers(),st.text()))
#======================================
# Examples using the wrapper syntax
#======================================
@battle_tested(default_output=[], seconds=1, max_tests=5)
def sample(i):
return []
@battle_tested(keep_testing=False)
def sample2(a,b,c,d=''):
t = a, b, c, d
# output for documentation
def test(a):
return int(a)
print(repr(fuzz(test)))
# test different speeds
@battle_tested(seconds=1)
def arg1_1sec(a):
return a
@battle_tested()
def arg1(a):
return a
@battle_tested(seconds=1)
def args2_1sec(a,b):
return a+b
@battle_tested()
def args2(a,b):
return a+b
@battle_tested(seconds=1)
def args3_1sec(a,b,c):
return a+b+c
@battle_tested()
def args3(a,b,c):
return a+b+c
@battle_tested(seconds=1)
def args4_1sec(a,b,c,d):
return a+b+c+d
@battle_tested()
def args4(a,b,c,d):
return a+b+c+d
@battle_tested(seconds=1)
def args5_1sec(a,b,c,d,e):
return a+b+c+d+e
@battle_tested()
def args5(a,b,c,d,e):
return a+b+c+d+e
# test the allow option
@battle_tested(allow=(AssertionError,))
def allowed_to_assert(a,b):
assert a==b, 'a needs to equal b'
@battle_tested(allow=(AssertionError,), keep_testing=False)
def allowed_to_assert_and_stop_on_fail(a,b):
assert a==b, 'a needs to equal b'
fuzz(max, allow=(ValueError,))
fuzz(max, keep_testing=False, allow=(ValueError,TypeError))
# test going quiet
print('going quiet')
def quiet_test_out():
pass
@battle_tested(keep_testing=False, quiet=True)
def quiet_test(a,b,c):
setattr(quiet_test_out, 'args', (a,b,c))
assert len(quiet_test_out.args) == 3, 'fuzzing quiet test failed'
quiet_lambda = lambda a,b,c:setattr(quiet_test_out, 'lambda_args', (a,b,c))
r = fuzz(quiet_lambda, quiet=True, keep_testing=False)
assert len(quiet_test_out.lambda_args) == 3, 'fuzzing quiet lambda failed'
print('quiet test complete')
# proof that they only get tested once
print(sample(4))
print(sample2(1,2,3,4))
print(sample('i'))
print(sample2('a','b',2,4))
# prove that successes of any type are possible
r = fuzz(lambda i:i , keep_testing=True, seconds=10)
assert len(r.crash_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.exception_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.iffy_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.unique_crashes) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.output_types) > 10, 'fuzzing lambda() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing lambda() changed expected behavior'
#======================================
# Examples using the function syntax
#======================================
def sample3(a,b):
# this one blows up on purpose
return a+b+1
# this tests a long fuzz
r=fuzz(sample3, seconds=20)
assert len(r.successful_io)>0, 'succesful_io was empty'
print(r.successful_io)
crash_map()
success_map()
assert len(r.crash_input_types) > 10 , 'fuzzing sample3() changed expected behavior'
assert len(r.exception_types) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.unique_crashes) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.output_types) > 1, 'fuzzing sample3() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing sample3() changed expected behavior'
fuzz(lambda i:i)
#======================================
# example harness
#======================================
def harness(key,value):
global mydict
global crash_examples
global successful_types
try:
mydict[key]=value
successful_types.add((type(key).name, type(value).name))
except Exception as e:
print('found one')
crash_examples[e.args[0]]=(key,value)
for f in storage.results.keys():
s = '\n'
try:
s+=f.__module__
s+=' '
s+=f.__name__
s+=' '
s+=str([i for i in dir(f) if not i.startswith('_')])
except:
pass
finally:
print(s)
print('battle_tested test complete...')
if __name__ == '__main__':
run_tests()
|
dataframe.py
|
import os
import threading
import time
import pandas as pd
import tensorflow as tf
import numpy as np
def split_dataframe_list_to_rows(df, target_column, separator):
"""Splits column that contains list into row per element of the list.
Args:
df: dataframe to split
target_column: the column containing the values to split
separator: the symbol used to perform the split
Returns:
dataframe with each entry for the target column separated,
with each element moved into a new row. The values in the
other columns are duplicated across the newly divided rows.
"""
def split_list_to_rows(row, row_accumulator, target_column, separator):
"""
Args:
row:
row_accumulator:
target_column:
separator:
Returns:
"""
split_row = row[target_column].split(separator)
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
row_accumulator.append(new_row)
new_rows = []
df.apply(split_list_to_rows, axis=1, args=(new_rows, target_column, separator))
new_df = pd.DataFrame(new_rows)
return new_df
def get_id_character_mapping(data, columns):
"""Creating a mapping between characters and ids given dataframe.
Args:
data: dataframe that contains characters that need to be converted to ids
column: a column of the dataframe that contains characters that need to be converted to ids
columns:
Returns:
id_to_character: dictionary of ids and characters
character_to_id: dictionary of characters and ids
"""
characters = set([])
for column in columns:
[characters.update(set(val)) for index, val in data[column].iteritems()]
characters = list(sorted(characters))
id_to_character = {i: characters[i] for i in range(len(characters))}
character_to_id = {characters[i]: i for i in range(len(characters))}
return id_to_character, character_to_id
def get_category_to_id_mapping(data, column):
"""Creates two mappings for id and categorical value and vice verse for given column.
Id is a unique identifier of categorical value. Starting from 0.
Args:
data: dataframe that contains categorical values
column: a column of dataframe that contains categorical values for which a mapping from categorical value
to id is needed
Returns:
id_to_category: dictionary of ids and categories
category_to_id: dictionary of categories and ids
"""
categories = sorted(data[column].unique())
print("There are {} unique categories".format(len(categories)))
id_to_category = {i: categories[i] for i in range(len(categories))}
category_to_id = {categories[i]: i for i in range(len(categories))}
return id_to_category, category_to_id
def save_as_tfrecords_multithreaded(path, original_data, columns=["sequence"], group_by_col="Label"):
"""Provided data gets splitted in to groups and processed concurrently.
The outcome of this is a file per group.
Args:
path: Location where files should be stored
original_data: dataframe which should be converted into files
columns: a list of columns which should be stored as sequences (Default value = ["sequence"])
group_by_col: a column name by which split data into groups (Default value = "Label")
Returns:
"""
os.makedirs(path, exist_ok=True)
threading_start = time.time()
coord = tf.train.Coordinator()
threads = []
data = original_data.groupby(group_by_col)
for group_id in data.groups:
group_name = group_id.replace(".", "_").replace("-", "_")
filename = os.path.join(path, group_name)
args = (filename, data.get_group(group_id), columns)
t = threading.Thread(target=save_as_tfrecords, args=args)
t.start()
threads.append(t)
coord.join(threads)
print("Completed all threads in {} seconds".format(time.time() - threading_start))
def to_int_feature(data):
"""
Converts int list to tf Feature
Args:
data: int list to be stored in tf record
Returns:
tf Feature that is used in building tfrecord
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=data))
def save_as_tfrecords(filename, data, columns=["sequence"], extension="tfrecords"):
"""Processes a dataframe and stores data into tfrecord file
Args:
filename: the absolute path of the tfrecords file where data should be stored
data: dataframe containing data will be converted into tfrecord
columns: list of columns that should be stored as varying-length sequences (Default value = ["sequence"])
extension: file extension
Returns:
"""
try:
filename = "{}.{}".format(filename, extension)
with tf.python_io.TFRecordWriter(filename) as writer:
for index, row in data.iterrows():
feature = {
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[row[0]])),
}
for index, col_name in enumerate(columns):
value = row[index]
feature[col_name] = to_int_feature(value)
feature['length_' + col_name]: to_int_feature(len(value))
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
print("Data was stored in {}".format(filename))
except Exception as e:
print("Something went wrong went writting in to tfrecords file")
print(e)
def save_as_npy(path, original_data, columns=["Label", "sequence"], ):
"""Processes a dataframe and stores data into npy file
Args:
filename: the absolute path of the npy file where data should be stored
data: dataframe containing data to be stored
columns: list of columns that should be stored
extension: file extension
Returns:
"""
os.makedirs(path, exist_ok=True)
try:
filename = os.path.join(path, "data.npy")
np.save(filename, original_data[columns].values)
print("Data was stored in {}".format(filename))
except Exception as e:
print("Something went wrong went writting in to npy file ({})".format(filename))
print(e)
|
singleton.py
|
import threading
class Singleton(object):
_instance_lock = threading.Lock()
@classmethod
def instance(cls, *args, **kwargs):
if not hasattr(Singleton, "_instance"):
with Singleton._instance_lock:
Singleton._instance = Singleton(*args, **kwargs)
return Singleton._instance
def task(arg):
obj = Singleton.instance()
print("Task {}".format(arg), id(obj))
for i in range(10):
t = threading.Thread(target=task, args=[i,])
t.start()
|
background_loop.py
|
import os, psycopg2
import config
from time import sleep
import csv, random
import threading
petrol_requests = []
def connect():
conn = psycopg2.connect(
database=config.db,
user=config.user,
password=config.pswd,
host=config.host,
port=config.port)
return conn
def car_loop(conn, cur):
mileage = 25 #kmpl
liter_in_1_m = 1/25000 # liter
while(True):
sleep(5)
cur.execute("select * from car")
rows = cur.fetchall()
for row in rows:
car_id,_,_,running, loc, speed, fuel, aq, cur_stretch, _, _, next_node = row[0:12]
#new_loc =
try:
# PETROL PUMP AND FUEL
fuel = fuel - (speed*25/18*liter_in_1_m)
cur.execute(f"select id, fuel_amount from petrolpump\
where st_distance(loc::geography, '{loc}'::geography) < 5 limit 1")
if cur.rowcount != 0 and random.choice([True, False]): # 50% chance of refilling gas from station
[p_id, p_fuel] = cur.fetchone()
fill_fuel = random.choices([ 10, 20, 50, 100], weights=[0.4, 0.3, 0.2, 0.1], k=1)[0]
fuel = fuel + fill_fuel
cur.execute("update petrolpump set fuel_amount = fuel_amount - %s \
where id = %s",(str(fill_fuel), str(p_id)))
conn.commit()
cur.execute("select %s/st_distance(%s::geography, %s::geography)",
(speed*25/18, loc, next_node))
new_frac = cur.fetchone()[0]
print(f"frac:{new_frac}")
if new_frac < 1:
cur.execute("select st_lineinterpolatepoint(st_makeline(%s, %s), %s)",
(loc, next_node, new_frac))
new_loc = cur.fetchone()[0]
new_stretch = cur_stretch
new_next_node = next_node
else:
print(next_node)
cur.execute(f"select st_astext('{next_node}')")
print(cur.fetchone()[0])
cur.execute("select id, node_a, node_b from\
((select id, node_a, node_b from roadstretch\
where st_distance(node_a::geography , %s::geography) < 5)\
union\
(select id, node_b, node_a from roadstretch\
where st_distance(node_b::geography, %s::geography) < 5))\
as foo order by random() limit 1;\
", (str(next_node), str(next_node)))
row = cur.fetchone()
new_loc = row[1]
new_stretch = row[0]
new_next_node = row[2]
except:
print("###############EXCEPTION#################")
continue
# if running:
# new_running = random.choices([True, False], weights=[0.7, 0.3], k=1)[0]
# else:
# new_running = random.choices([True, False], weights=[0.5, 0.5], k=1)[0]
new_aq = max(0, min(100, aq+random.choices([2, -2, 0], weights=[0.1, 0.1, 0.8], k=1)[0]))
new_speed = max(30, min(120, speed+random.choices([5, -5, 0], weights=[0.6, 0.3, 0.3], k=1)[0]))
new_running = True
if running == False and fuel == 0:
if random.choice([True, False], [0.05, 0.95]):
print("Added 10 litres of petrol!")
fuel = fuel + 10
if random.choices([True, False], [0.05, 0.95])[0] or fuel == 0:
new_running = False
new_speed = 0
print(f"old:{loc}, new:{new_loc}")
cur.execute("update car set (running, loc, speed, fuel, air_quality, cur_stretch, stretch_next_node)=\
(%s, %s, %s, %s, %s, %s, %s) where id = %s",
(str(new_running), str(new_loc), str(new_speed), str(fuel), str(new_aq), str(new_stretch), str(new_next_node), str(car_id))
)
conn.commit()
print("finished")
print("=========================================================")
def trafficsignal_loop(conn, cur):
while(True):
sleep(60)
cur.execute("update trafficsignal set signal = case\
when signal = 'R' then 'Y'\
when signal = 'Y' then 'G'\
when signal = 'G' then 'R'\
end")
conn.commit()
def petrolpump_loop(conn, cur):
while(True):
cur.execute("update petrolpump set fuel_amount = fuel_amount + 400 where fuel_amount < 200;")
sleep(5*60)
def background_loop(conn, cur):
c_th = threading.Thread(name='car', target=car_loop, args=(conn, cur))
ts_th = threading.Thread(name='ts', target=trafficsignal_loop, args=(conn, cur))
pp_th = threading.Thread(name='pp', target=petrolpump_loop, args=(conn, cur))
c_th.start()
ts_th.start()
pp_th.start()
pass
if __name__ == '__main__':
conn = connect()
cur = conn.cursor()
background_loop(conn, cur)
|
advanced-reboot.py
|
#
# ptf --test-dir ptftests fast-reboot --qlen=1000 --platform remote -t 'verbose=True;dut_username="admin";dut_hostname="10.0.0.243";reboot_limit_in_seconds=30;portchannel_ports_file="/tmp/portchannel_interfaces.json";vlan_ports_file="/tmp/vlan_interfaces.json";ports_file="/tmp/ports.json";dut_mac="4c:76:25:f5:48:80";default_ip_range="192.168.0.0/16";vlan_ip_range="{\"Vlan100\": \"172.0.0.0/22\"}";arista_vms="[\"10.0.0.200\",\"10.0.0.201\",\"10.0.0.202\",\"10.0.0.203\"]"' --platform-dir ptftests --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre
#
#
# This test checks that DUT is able to make FastReboot procedure
#
# This test supposes that fast-reboot/warm-reboot initiates by running /usr/bin/{fast,warm}-reboot command.
#
# The test uses "pings". The "pings" are packets which are sent through dataplane in two directions
# 1. From one of vlan interfaces to T1 device. The source ip, source interface, and destination IP are chosen randomly from valid choices. Number of packet is 100.
# 2. From all of portchannel ports to all of vlan ports. The source ip, source interface, and destination IP are chosed sequentially from valid choices.
# Currently we have 500 distrinct destination vlan addresses. Our target to have 1000 of them.
#
# The test sequence is following:
# 1. Check that DUT is stable. That means that "pings" work in both directions: from T1 to servers and from servers to T1.
# 2. If DUT is stable the test starts continiously pinging DUT in both directions.
# 3. The test runs '/usr/bin/{fast,warm}-reboot' on DUT remotely. The ssh key supposed to be uploaded by ansible before the test
# 4. As soon as it sees that ping starts failuring in one of directions the test registers a start of dataplace disruption
# 5. As soon as the test sees that pings start working for DUT in both directions it registers a stop of dataplane disruption
# 6. If the length of the disruption is less than 30 seconds (if not redefined by parameter) - the test passes
# 7. If there're any drops, when control plane is down - the test fails
# 8. When test start reboot procedure it connects to all VM (which emulates T1) and starts fetching status of BGP and LACP
# LACP is supposed to be down for one time only, if not - the test fails
# if default value of BGP graceful restart timeout is less than 120 seconds the test fails
# if BGP graceful restart is not enabled on DUT the test fails
# If BGP graceful restart timeout value is almost exceeded (less than 15 seconds) the test fails
# if BGP routes disappeares more then once, the test failed
#
# The test expects you're running the test with link state propagation helper.
# That helper propagate a link state from fanout switch port to corresponding VM port
#
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
from ptf.testutils import *
from ptf.dataplane import match_exp_pkt
import datetime
import time
import subprocess
from ptf.mask import Mask
import socket
import ptf.packet as scapy
import thread
import threading
from multiprocessing.pool import ThreadPool, TimeoutError
import os
import signal
import random
import struct
import socket
from pprint import pprint
from fcntl import ioctl
import sys
import json
import re
from collections import defaultdict
import json
import Queue
import pickle
from operator import itemgetter
import scapy.all as scapyall
import itertools
from device_connection import DeviceConnection
import multiprocessing
import ast
from arista import Arista
import sad_path as sp
class StateMachine():
def __init__(self, init_state='init'):
self.state_lock = threading.RLock()
self.state_time = {} # Recording last time when entering a state
self.state = None
self.flooding = False
self.set(init_state)
def set(self, state):
with self.state_lock:
self.state = state
self.state_time[state] = datetime.datetime.now()
def get(self):
with self.state_lock:
cur_state = self.state
return cur_state
def get_state_time(self, state):
with self.state_lock:
time = self.state_time[state]
return time
def set_flooding(self, flooding):
with self.state_lock:
self.flooding = flooding
def is_flooding(self):
with self.state_lock:
flooding = self.flooding
return flooding
class ReloadTest(BaseTest):
TIMEOUT = 0.5
PKT_TOUT = 1
VLAN_BASE_MAC_PATTERN = '72060001{:04}'
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
def __init__(self):
BaseTest.__init__(self)
self.fails = {}
self.info = {}
self.cli_info = {}
self.logs_info = {}
self.log_lock = threading.RLock()
self.vm_handle = None
self.sad_handle = None
self.process_id = str(os.getpid())
self.test_params = testutils.test_params_get()
self.check_param('verbose', False, required=False)
self.check_param('dut_username', '', required=True)
self.check_param('dut_password', '', required=True)
self.check_param('dut_hostname', '', required=True)
self.check_param('reboot_limit_in_seconds', 30, required=False)
self.check_param('reboot_type', 'fast-reboot', required=False)
self.check_param('graceful_limit', 240, required=False)
self.check_param('portchannel_ports_file', '', required=True)
self.check_param('vlan_ports_file', '', required=True)
self.check_param('ports_file', '', required=True)
self.check_param('dut_mac', '', required=True)
self.check_param('default_ip_range', '', required=True)
self.check_param('vlan_ip_range', '', required=True)
self.check_param('lo_prefix', '10.1.0.32/32', required=False)
self.check_param('lo_v6_prefix', 'fc00:1::/64', required=False)
self.check_param('arista_vms', [], required=True)
self.check_param('min_bgp_gr_timeout', 15, required=False)
self.check_param('warm_up_timeout_secs', 300, required=False)
self.check_param('dut_stabilize_secs', 30, required=False)
self.check_param('preboot_files', None, required=False)
self.check_param('preboot_oper', None, required=False) # preboot sad path to inject before warm-reboot
self.check_param('inboot_oper', None, required=False) # sad path to inject during warm-reboot
self.check_param('nexthop_ips', [], required=False) # nexthops for the routes that will be added during warm-reboot
self.check_param('allow_vlan_flooding', False, required=False)
self.check_param('sniff_time_incr', 60, required=False)
self.check_param('vnet', False, required=False)
self.check_param('vnet_pkts', None, required=False)
self.check_param('target_version', '', required=False)
self.check_param('bgp_v4_v6_time_diff', 40, required=False)
self.check_param('logfile_suffix', None, required=False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None':
self.test_params['inboot_oper'] = None
# initialize sad oper
if self.test_params['preboot_oper']:
self.sad_oper = self.test_params['preboot_oper']
else:
self.sad_oper = self.test_params['inboot_oper']
if self.test_params['logfile_suffix']:
self.logfile_suffix = self.test_params['logfile_suffix']
else:
self.logfile_suffix = self.sad_oper
if self.logfile_suffix:
self.log_file_name = '/tmp/%s-%s.log' % (self.test_params['reboot_type'], self.logfile_suffix)
self.report_file_name = '/tmp/%s-%s.json' % (self.test_params['reboot_type'], self.logfile_suffix)
else:
self.log_file_name = '/tmp/%s.log' % self.test_params['reboot_type']
self.report_file_name = '/tmp/%s-report.json' % self.test_params['reboot_type']
self.report = dict()
self.log_fp = open(self.log_file_name, 'w')
self.packets_list = []
self.vnet = self.test_params['vnet']
if (self.vnet):
self.packets_list = json.load(open(self.test_params['vnet_pkts']))
# a flag whether to populate FDB by sending traffic from simulated servers
# usually ARP responder will make switch populate its FDB table, but Mellanox on 201803 has
# no L3 ARP support, so this flag is used to W/A this issue
self.setup_fdb_before_test = self.test_params.get('setup_fdb_before_test', False)
# Default settings
self.ping_dut_pkts = 10
self.arp_ping_pkts = 1
self.nr_pc_pkts = 100
self.nr_tests = 3
self.reboot_delay = 10
self.task_timeout = 300 # Wait up to 5 minutes for tasks to complete
self.max_nr_vl_pkts = 500 # FIXME: should be 1000.
# But ptf is not fast enough + swss is slow for FDB and ARP entries insertions
self.timeout_thr = None
self.time_to_listen = 180.0 # Listen for more then 180 seconds, to be used in sniff_in_background method.
# Inter-packet interval, to be used in send_in_background method.
# Improve this interval to gain more precision of disruptions.
self.send_interval = 0.0035
self.packets_to_send = min(int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) # How many packets to be sent in send_in_background method
# Thread pool for background watching operations
self.pool = ThreadPool(processes=3)
# State watcher attributes
self.watching = False
self.cpu_state = StateMachine('init')
self.asic_state = StateMachine('init')
self.vlan_state = StateMachine('init')
self.vlan_lock = threading.RLock()
self.asic_state_time = {} # Recording last asic state entering time
self.asic_vlan_reach = [] # Recording asic vlan reachability
self.recording = False # Knob for recording asic_vlan_reach
# light_probe:
# True : when one direction probe fails, don't probe another.
# False: when one direction probe fails, continue probe another.
self.light_probe = False
# We have two data plane traffic generators which are mutualy exclusive
# one is the reachability_watcher thread
# second is the fast send_in_background
self.dataplane_io_lock = threading.Lock()
self.allow_vlan_flooding = bool(self.test_params['allow_vlan_flooding'])
self.dut_connection = DeviceConnection(
self.test_params['dut_hostname'],
self.test_params['dut_username'],
password=self.test_params['dut_password'],
alt_password=self.test_params.get('alt_password')
)
# Check if platform type is kvm
stdout, stderr, return_code = self.dut_connection.execCommand("show platform summary | grep Platform | awk '{print $2}'")
platform_type = str(stdout[0]).replace('\n', '')
if platform_type == 'x86_64-kvm_x86_64-r0':
self.kvm_test = True
else:
self.kvm_test = False
return
def read_json(self, name):
with open(self.test_params[name]) as fp:
content = json.load(fp)
return content
def read_port_indices(self):
port_indices = self.read_json('ports_file')
return port_indices
def read_vlan_portchannel_ports(self):
portchannel_content = self.read_json('portchannel_ports_file')
portchannel_names = [pc['name'] for pc in portchannel_content.values()]
vlan_content = self.read_json('vlan_ports_file')
ports_per_vlan = dict()
pc_in_vlan = []
for vlan in self.vlan_ip_range.keys():
ports_in_vlan = []
for ifname in vlan_content[vlan]['members']:
if ifname in portchannel_names:
pc_in_vlan.append(ifname)
else:
ports_in_vlan.append(self.port_indices[ifname])
ports_per_vlan[vlan] = ports_in_vlan
pc_ifaces = []
for pc in portchannel_content.values():
if not pc['name'] in pc_in_vlan:
pc_ifaces.extend([self.port_indices[member] for member in pc['members']])
return ports_per_vlan, pc_ifaces
def check_param(self, param, default, required = False):
if param not in self.test_params:
if required:
raise Exception("Test parameter '%s' is required" % param)
self.test_params[param] = default
def random_ip(self, ip):
net_addr, mask = ip.split('/')
n_hosts = 2**(32 - int(mask))
random_host = random.randint(2, n_hosts - 2)
return self.host_ip(ip, random_host)
def host_ip(self, net_ip, host_number):
src_addr, mask = net_ip.split('/')
n_hosts = 2**(32 - int(mask))
if host_number > (n_hosts - 2):
raise Exception("host number %d is greater than number of hosts %d in the network %s" % (host_number, n_hosts - 2, net_ip))
src_addr_n = struct.unpack(">I", socket.inet_aton(src_addr))[0]
net_addr_n = src_addr_n & (2**32 - n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def random_port(self, ports):
return random.choice(ports)
def log(self, message, verbose=False):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with self.log_lock:
if verbose and self.test_params['verbose'] or not verbose:
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
self.log_fp.flush()
def timeout(self, func, seconds, message):
signal = multiprocessing.Event()
async_res = self.pool.apply_async(func, args=(signal,))
try:
res = async_res.get(timeout=seconds)
except Exception as err:
# TimeoutError and Exception's from func
# captured here
signal.set()
raise type(err)(message)
return res
def generate_vlan_servers(self):
vlan_host_map = defaultdict(dict)
self.nr_vl_pkts = 0 # Number of packets from upper layer
for vlan, prefix in self.vlan_ip_range.items():
if not self.ports_per_vlan[vlan]:
continue
_, mask = prefix.split('/')
n_hosts = min(2**(32 - int(mask)) - 3, self.max_nr_vl_pkts)
for counter, i in enumerate(xrange(2, n_hosts + 2)):
mac = self.VLAN_BASE_MAC_PATTERN.format(counter)
port = self.ports_per_vlan[vlan][i % len(self.ports_per_vlan[vlan])]
addr = self.host_ip(prefix, i)
vlan_host_map[port][addr] = mac
self.nr_vl_pkts += n_hosts
return vlan_host_map
def generate_arp_responder_conf(self, vlan_host_map):
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
return arp_responder_conf
def dump_arp_responder_config(self, dump):
# save data for arp_replay process
filename = "/tmp/from_t1.json" if self.logfile_suffix is None else "/tmp/from_t1_%s.json" % self.logfile_suffix
with open(filename, "w") as fp:
json.dump(dump, fp)
def get_peer_dev_info(self):
content = self.read_json('peer_dev_info')
for key in content.keys():
if 'ARISTA' in key:
self.vm_dut_map[key] = dict()
self.vm_dut_map[key]['mgmt_addr'] = content[key]['mgmt_addr']
# initialize all the port mapping
self.vm_dut_map[key]['dut_ports'] = []
self.vm_dut_map[key]['neigh_ports'] = []
self.vm_dut_map[key]['ptf_ports'] = []
def get_portchannel_info(self):
content = self.read_json('portchannel_ports_file')
for key in content.keys():
for member in content[key]['members']:
for vm_key in self.vm_dut_map.keys():
if member in self.vm_dut_map[vm_key]['dut_ports']:
self.vm_dut_map[vm_key]['dut_portchannel'] = str(key)
self.vm_dut_map[vm_key]['neigh_portchannel'] = 'Port-Channel1'
break
def get_neigh_port_info(self):
content = self.read_json('neigh_port_info')
for key in content.keys():
if content[key]['name'] in self.vm_dut_map.keys():
self.vm_dut_map[content[key]['name']]['dut_ports'].append(str(key))
self.vm_dut_map[content[key]['name']]['neigh_ports'].append(str(content[key]['port']))
self.vm_dut_map[content[key]['name']]['ptf_ports'].append(self.port_indices[key])
def build_peer_mapping(self):
'''
Builds a map of the form
'ARISTA01T1': {'mgmt_addr':
'neigh_portchannel'
'dut_portchannel'
'neigh_ports'
'dut_ports'
'ptf_ports'
}
'''
self.vm_dut_map = {}
for file in self.test_params['preboot_files'].split(','):
self.test_params[file] = '/tmp/' + file + '.json'
self.get_peer_dev_info()
self.get_neigh_port_info()
self.get_portchannel_info()
def build_vlan_if_port_mapping(self):
portchannel_content = self.read_json('portchannel_ports_file')
portchannel_names = [pc['name'] for pc in portchannel_content.values()]
vlan_content = self.read_json('vlan_ports_file')
vlan_if_port = []
for vlan in self.vlan_ip_range:
for ifname in vlan_content[vlan]['members']:
if ifname not in portchannel_names:
vlan_if_port.append((ifname, self.port_indices[ifname]))
return vlan_if_port
def populate_fail_info(self, fails):
for key in fails:
if key not in self.fails:
self.fails[key] = set()
self.fails[key] |= fails[key]
def get_sad_info(self):
'''
Prepares the msg string to log when a sad_oper is defined. Sad oper can be a preboot or inboot oper
sad_oper can be represented in the following ways
eg. 'preboot_oper' - a single VM will be selected and preboot_oper will be applied to it
'neigh_bgp_down:2' - 2 VMs will be selected and preboot_oper will be applied to the selected 2 VMs
'neigh_lag_member_down:3:1' - this case is used for lag member down operation only. This indicates that
3 VMs will be selected and 1 of the lag members in the porchannel will be brought down
'inboot_oper' - represents a routing change during warm boot (add or del of multiple routes)
'routing_add:10' - adding 10 routes during warm boot
'''
msg = ''
if self.sad_oper:
msg = 'Sad oper: %s ' % self.sad_oper
if ':' in self.sad_oper:
oper_list = self.sad_oper.split(':')
msg = 'Sad oper: %s ' % oper_list[0] # extract the sad oper_type
if len(oper_list) > 2:
# extract the number of VMs and the number of LAG members. sad_oper will be of the form oper:no of VMS:no of lag members
msg += 'Number of sad path VMs: %s Lag member down in a portchannel: %s' % (oper_list[-2], oper_list[-1])
else:
# inboot oper
if 'routing' in self.sad_oper:
msg += 'Number of ip addresses: %s' % oper_list[-1]
else:
# extract the number of VMs. preboot_oper will be of the form oper:no of VMS
msg += 'Number of sad path VMs: %s' % oper_list[-1]
return msg
def init_sad_oper(self):
if self.sad_oper:
self.log("Preboot/Inboot Operations:")
self.sad_handle = sp.SadTest(self.sad_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.vlan_ports, self.ports_per_vlan)
(self.ssh_targets, self.portchannel_ports, self.neigh_vm, self.vlan_ports, self.ports_per_vlan), (log_info, fails) = self.sad_handle.setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
if self.sad_oper:
log_info, fails = self.sad_handle.verify()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def do_inboot_oper(self):
'''
Add or del routes during boot
'''
if self.sad_oper and 'routing' in self.sad_oper:
self.log("Performing inboot operation")
log_info, fails = self.sad_handle.route_setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_inboot_sad_status(self):
if 'routing_add' in self.sad_oper:
self.log('Verify if new routes added during warm reboot are received')
else:
self.log('Verify that routes deleted during warm reboot are removed')
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=True)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_postboot_sad_status(self):
self.log("Postboot checks:")
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=False)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def sad_revert(self):
self.log("Revert to preboot state:")
log_info, fails = self.sad_handle.revert()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def setUp(self):
self.fails['dut'] = set()
self.port_indices = self.read_port_indices()
self.vlan_ip_range = ast.literal_eval(self.test_params['vlan_ip_range'])
self.ports_per_vlan, self.portchannel_ports = self.read_vlan_portchannel_ports()
self.vlan_ports = []
for ports in self.ports_per_vlan.values():
self.vlan_ports += ports
if self.sad_oper:
self.build_peer_mapping()
self.test_params['vlan_if_port'] = self.build_vlan_if_port_mapping()
self.default_ip_range = self.test_params['default_ip_range']
self.limit = datetime.timedelta(seconds=self.test_params['reboot_limit_in_seconds'])
self.reboot_type = self.test_params['reboot_type']
if self.reboot_type not in ['fast-reboot', 'warm-reboot', 'warm-reboot -f']:
raise ValueError('Not supported reboot_type %s' % self.reboot_type)
self.dut_mac = self.test_params['dut_mac']
if self.kvm_test:
self.log("This test is for KVM platform")
# get VM info
if isinstance(self.test_params['arista_vms'], list):
arista_vms = self.test_params['arista_vms']
else:
arista_vms = self.test_params['arista_vms'][1:-1].split(",")
self.ssh_targets = []
for vm in arista_vms:
if (vm.startswith("'") or vm.startswith('"')) and (vm.endswith("'") or vm.endswith('"')):
self.ssh_targets.append(vm[1:-1])
else:
self.ssh_targets.append(vm)
self.log("Converted addresses VMs: %s" % str(self.ssh_targets))
self.init_sad_oper()
self.vlan_host_map = self.generate_vlan_servers()
arp_responder_conf = self.generate_arp_responder_conf(self.vlan_host_map)
self.dump_arp_responder_config(arp_responder_conf)
self.random_vlan = random.choice(self.vlan_ports)
self.from_server_src_port = self.random_vlan
self.from_server_src_addr = random.choice(self.vlan_host_map[self.random_vlan].keys())
self.from_server_dst_addr = self.random_ip(self.test_params['default_ip_range'])
self.from_server_dst_ports = self.portchannel_ports
self.log("Test params:")
self.log("DUT ssh: %s@%s" % (self.test_params['dut_username'], self.test_params['dut_hostname']))
self.log("DUT reboot limit in seconds: %s" % self.limit)
self.log("DUT mac address: %s" % self.dut_mac)
self.log("From server src addr: %s" % self.from_server_src_addr)
self.log("From server src port: %s" % self.from_server_src_port)
self.log("From server dst addr: %s" % self.from_server_dst_addr)
self.log("From server dst ports: %s" % self.from_server_dst_ports)
self.log("From upper layer number of packets: %d" % self.nr_vl_pkts)
self.log("VMs: %s" % str(self.test_params['arista_vms']))
self.log("Reboot type is %s" % self.reboot_type)
self.generate_from_t1()
self.generate_from_vlan()
self.generate_ping_dut_lo()
self.generate_arp_ping_packet()
if 'warm-reboot' in self.reboot_type:
self.log(self.get_sad_info())
# Pre-generate list of packets to be sent in send_in_background method.
generate_start = datetime.datetime.now()
if not self.vnet:
self.generate_bidirectional()
self.log("%d packets are ready after: %s" % (len(self.packets_list), str(datetime.datetime.now() - generate_start)))
self.dataplane = ptf.dataplane_instance
for p in self.dataplane.ports.values():
port = p.get_packet_source()
port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.SOCKET_RECV_BUFFER_SIZE)
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
self.log("Enabling arp_responder")
self.cmd(["supervisorctl", "restart", "arp_responder"])
return
def setup_fdb(self):
""" simulate traffic generated from servers to help populate FDB """
vlan_map = self.vlan_host_map
from_servers_pkt = testutils.simple_tcp_packet(
eth_dst=self.dut_mac,
ip_dst=self.from_server_dst_addr,
)
for port in vlan_map:
for addr in vlan_map[port]:
mac = vlan_map[port][addr]
from_servers_pkt[scapy.Ether].src = self.hex_to_mac(mac)
from_servers_pkt[scapy.IP].src = addr
testutils.send(self, port, from_servers_pkt)
# make sure orchagent processed new FDBs
time.sleep(1)
def tearDown(self):
self.log("Disabling arp_responder")
self.cmd(["supervisorctl", "stop", "arp_responder"])
# Stop watching DUT
self.watching = False
if config["log_dir"] != None:
self.dataplane.stop_pcap()
self.log_fp.close()
def get_if(self, iff, cmd):
s = socket.socket()
ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
s.close()
return ifreq
@staticmethod
def hex_to_mac(hex_mac):
return ':'.join(hex_mac[i:i+2] for i in range(0, len(hex_mac), 2))
def generate_from_t1(self):
self.from_t1 = []
# for each server host create a packet destinating server IP
for counter, host_port in enumerate(self.vlan_host_map):
src_addr = self.random_ip(self.default_ip_range)
src_port = self.random_port(self.portchannel_ports)
for server_ip in self.vlan_host_map[host_port]:
dst_addr = server_ip
# generate source MAC address for traffic based on LAG_BASE_MAC_PATTERN
mac_addr = self.hex_to_mac(self.LAG_BASE_MAC_PATTERN.format(counter))
packet = simple_tcp_packet(eth_src=mac_addr,
eth_dst=self.dut_mac,
ip_src=src_addr,
ip_dst=dst_addr,
ip_ttl=255,
tcp_dport=5000)
self.from_t1.append((src_port, str(packet)))
# expect any packet with dport 5000
exp_packet = simple_tcp_packet(
ip_src="0.0.0.0",
ip_dst="0.0.0.0",
tcp_dport=5000,
)
self.from_t1_exp_packet = Mask(exp_packet)
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "ttl")
def generate_from_vlan(self):
packet = simple_tcp_packet(
eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
tcp_dport=5000
)
exp_packet = simple_tcp_packet(
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
ip_ttl=63,
tcp_dport=5000,
)
self.from_vlan_exp_packet = Mask(exp_packet)
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_vlan_packet = str(packet)
def generate_ping_dut_lo(self):
dut_lo_ipv4 = self.test_params['lo_prefix'].split('/')[0]
packet = simple_icmp_packet(eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=dut_lo_ipv4)
exp_packet = simple_icmp_packet(eth_src=self.dut_mac,
ip_src=dut_lo_ipv4,
ip_dst=self.from_server_src_addr,
icmp_type='echo-reply')
self.ping_dut_exp_packet = Mask(exp_packet)
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "id")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.ping_dut_packet = str(packet)
def generate_arp_ping_packet(self):
vlan = next(k for k, v in self.ports_per_vlan.items() if v)
vlan_ip_range = self.vlan_ip_range[vlan]
vlan_port_canadiates = range(len(self.ports_per_vlan[vlan]))
vlan_port_canadiates.remove(0) # subnet prefix
vlan_port_canadiates.remove(1) # subnet IP on dut
src_idx = random.choice(vlan_port_canadiates)
vlan_port_canadiates.remove(src_idx)
dst_idx = random.choice(vlan_port_canadiates)
src_port = self.ports_per_vlan[vlan][src_idx]
dst_port = self.ports_per_vlan[vlan][dst_idx]
src_addr = self.host_ip(vlan_ip_range, src_idx)
dst_addr = self.host_ip(vlan_ip_range, dst_idx)
src_mac = self.hex_to_mac(self.vlan_host_map[src_port][src_addr])
packet = simple_arp_packet(eth_src=src_mac, arp_op=1, ip_snd=src_addr, ip_tgt=dst_addr, hw_snd=src_mac)
expect = simple_arp_packet(eth_dst=src_mac, arp_op=2, ip_snd=dst_addr, ip_tgt=src_addr, hw_tgt=src_mac)
self.log("ARP ping: src idx %d port %d mac %s addr %s" % (src_idx, src_port, src_mac, src_addr))
self.log("ARP ping: dst idx %d port %d addr %s" % (dst_idx, dst_port, dst_addr))
self.arp_ping = str(packet)
self.arp_resp = Mask(expect)
self.arp_resp.set_do_not_care_scapy(scapy.Ether, 'src')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwtype')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwsrc')
self.arp_src_port = src_port
def generate_bidirectional(self):
"""
This method is used to pre-generate packets to be sent in background thread.
Packets are composed into a list, and present a bidirectional flow as next:
five packet from T1, one packet from vlan.
Each packet has sequential TCP Payload - to be identified later.
"""
self.send_interval = self.time_to_listen / self.packets_to_send
self.packets_list = []
from_t1_iter = itertools.cycle(self.from_t1)
for i in xrange(self.packets_to_send):
payload = '0' * 60 + str(i)
if (i % 5) == 0 : # From vlan to T1.
packet = scapyall.Ether(self.from_vlan_packet)
packet.load = payload
from_port = self.from_server_src_port
else: # From T1 to vlan.
src_port, packet = next(from_t1_iter)
packet = scapyall.Ether(packet)
packet.load = payload
from_port = src_port
self.packets_list.append((from_port, str(packet)))
def put_nowait(self, queue, data):
try:
queue.put_nowait(data)
except Queue.Full:
pass
def pre_reboot_test_setup(self):
self.reboot_start = None
self.no_routing_start = None
self.no_routing_stop = None
self.no_control_start = None
self.no_control_stop = None
self.no_cp_replies = None
self.upper_replies = []
self.routing_always = False
self.total_disrupt_packets = None
self.total_disrupt_time = None
self.ssh_jobs = []
for addr in self.ssh_targets:
q = Queue.Queue(1)
thr = threading.Thread(target=self.peer_state_check, kwargs={'ip': addr, 'queue': q})
thr.setDaemon(True)
self.ssh_jobs.append((thr, q))
thr.start()
if self.setup_fdb_before_test:
self.log("Run some server traffic to populate FDB table...")
self.setup_fdb()
self.log("Starting reachability state watch thread...")
self.watching = True
self.light_probe = False
self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped.
self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running.
self.watcher_is_stopped.set() # By default the Watcher is not running.
self.watcher_is_running.clear() # By default its required to wait for the Watcher started.
# Give watch thread some time to wind up
watcher = self.pool.apply_async(self.reachability_watcher)
time.sleep(5)
def get_warmboot_finalizer_state(self):
stdout, stderr, _ = self.dut_connection.execCommand('sudo systemctl is-active warmboot-finalizer.service')
if stderr:
self.fails['dut'].add("Error collecting Finalizer state. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
raise Exception("Error collecting Finalizer state. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
if not stdout:
self.log('Finalizer state not returned from DUT')
return ''
finalizer_state = stdout[0].strip()
return finalizer_state
def get_now_time(self):
stdout, stderr, _ = self.dut_connection.execCommand('date +"%Y-%m-%d %H:%M:%S"')
if stderr:
self.fails['dut'].add("Error collecting current date from DUT. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
raise Exception("Error collecting current date from DUT. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
if not stdout:
self.fails['dut'].add('Error collecting current date from DUT: empty value returned')
raise Exception('Error collecting current date from DUT: empty value returned')
return datetime.datetime.strptime(stdout[0].strip(), "%Y-%m-%d %H:%M:%S")
def check_warmboot_finalizer(self, finalizer_timeout):
self.wait_until_control_plane_up()
dut_datetime = self.get_now_time()
self.log('waiting for warmboot-finalizer service to become activating')
finalizer_state = self.get_warmboot_finalizer_state()
while finalizer_state != 'activating':
time.sleep(1)
dut_datetime_after_ssh = self.get_now_time()
time_passed = float(dut_datetime_after_ssh.strftime("%s")) - float(dut_datetime.strftime("%s"))
if time_passed > finalizer_timeout:
self.fails['dut'].add('warmboot-finalizer never reached state "activating"')
raise TimeoutError
finalizer_state = self.get_warmboot_finalizer_state()
self.log('waiting for warmboot-finalizer service to finish')
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
count = 0
while finalizer_state == 'activating':
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
time.sleep(10)
if count * 10 > int(self.test_params['warm_up_timeout_secs']):
self.fails['dut'].add('warmboot-finalizer.service did not finish')
raise TimeoutError
count += 1
self.log('warmboot-finalizer service finished')
def wait_until_control_plane_down(self):
self.log("Wait until Control plane is down")
self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout))
if self.reboot_type == 'fast-reboot':
self.light_probe = True
else:
# add or del routes during boot
self.do_inboot_oper()
self.reboot_start = datetime.datetime.now()
self.log("Dut reboots: reboot start %s" % str(self.reboot_start))
def wait_until_control_plane_up(self):
self.log("Wait until Control plane is up")
self.timeout(self.wait_until_cpu_port_up, self.task_timeout, "DUT hasn't come back up in {} seconds".format(self.task_timeout))
self.no_control_stop = datetime.datetime.now()
self.log("Dut reboots: control plane up at %s" % str(self.no_control_stop))
def handle_fast_reboot_health_check(self):
self.log("Check that device is still forwarding data plane traffic")
self.fails['dut'].add("Data plane has a forwarding problem after CPU went down")
self.check_alive()
self.fails['dut'].clear()
self.send_and_sniff()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
self.save_sniffed_packets()
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("Dataplane disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
def handle_warm_reboot_health_check(self):
self.send_and_sniff()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
self.save_sniffed_packets()
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
if self.lost_packets:
self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
else:
self.no_routing_start = self.reboot_start
self.no_routing_stop = self.reboot_start
def handle_post_reboot_health_check(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
self.log('Waiting till SSH threads stop')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
if 'warm-reboot' in self.reboot_type:
if self.total_disrupt_time > self.limit.total_seconds():
self.fails['dut'].add("Total downtime period must be less then %s seconds. It was %s" \
% (str(self.limit), str(self.total_disrupt_time)))
# after the data plane is up, check for routing changes
if self.test_params['inboot_oper'] and self.sad_handle:
self.check_inboot_sad_status()
# postboot check for all preboot operations
if self.test_params['preboot_oper'] and self.sad_handle:
self.check_postboot_sad_status()
else:
# verify there are no interface flaps after warm boot
self.neigh_lag_status_check()
def handle_advanced_reboot_health_check_kvm(self):
self.log("Wait until data plane stops")
forward_stop_signal = multiprocessing.Event()
async_forward_stop = self.pool.apply_async(self.check_forwarding_stop, args=(forward_stop_signal,))
self.log("Wait until control plane up")
port_up_signal = multiprocessing.Event()
async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up, args=(port_up_signal,))
try:
self.no_routing_start, _ = async_forward_stop.get(timeout=self.task_timeout)
self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(self.no_routing_start))
except TimeoutError:
forward_stop_signal.set()
self.log("Data plane never stop")
try:
async_cpu_up.get(timeout=self.task_timeout)
no_control_stop = self.cpu_state.get_state_time('up')
self.log("Control plane down stops %s" % str(no_control_stop))
except TimeoutError as e:
port_up_signal.set()
self.log("DUT hasn't bootup in %d seconds" % self.task_timeout)
self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout)
raise
# Wait until data plane up if it stopped
if self.no_routing_start is not None:
self.no_routing_stop, _ = self.timeout(self.check_forwarding_resume,
self.task_timeout,
"DUT hasn't started to work for %d seconds" % self.task_timeout)
else:
self.no_routing_stop = datetime.datetime.min
self.no_routing_start = datetime.datetime.min
# Stop watching DUT
self.watching = False
def handle_post_reboot_health_check_kvm(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
def handle_post_reboot_test_reports(self):
# Stop watching DUT
self.watching = False
# revert to pretest state
if self.sad_oper and self.sad_handle:
self.sad_revert()
if self.test_params['inboot_oper']:
self.check_postboot_sad_status()
self.log(" ")
# Generating report
self.log("="*50)
self.log("Report:")
self.log("="*50)
self.log("LACP/BGP were down for (extracted from cli):")
self.log("-"*50)
for ip in sorted(self.cli_info.keys()):
self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \
% (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \
self.cli_info[ip]['po'][1], \
self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\
self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0]))
self.log("-"*50)
self.log("Extracted from VM logs:")
self.log("-"*50)
for ip in sorted(self.logs_info.keys()):
self.log("Extracted log info from %s" % ip)
for msg in sorted(self.logs_info[ip].keys()):
if not msg in [ 'error', 'route_timeout' ]:
self.log(" %s : %d" % (msg, self.logs_info[ip][msg]))
else:
self.log(" %s" % self.logs_info[ip][msg])
self.log("-"*50)
self.log("Summary:")
self.log("-"*50)
if self.no_routing_stop:
self.log("Longest downtime period was %s" % str(self.no_routing_stop - self.no_routing_start))
reboot_time = "0:00:00" if self.routing_always else str(self.no_routing_stop - self.reboot_start)
self.log("Reboot time was %s" % reboot_time)
self.log("Expected downtime is less then %s" % self.limit)
if self.reboot_type == 'fast-reboot' and self.no_cp_replies:
self.log("How many packets were received back when control plane was down: %d Expected: %d" % (self.no_cp_replies, self.nr_vl_pkts))
has_info = any(len(info) > 0 for info in self.info.values())
if has_info:
self.log("-"*50)
self.log("Additional info:")
self.log("-"*50)
for name, info in self.info.items():
for entry in info:
self.log("INFO:%s:%s" % (name, entry))
self.log("-"*50)
is_good = all(len(fails) == 0 for fails in self.fails.values())
errors = ""
if not is_good:
self.log("-"*50)
self.log("Fails:")
self.log("-"*50)
errors = "\n\nSomething went wrong. Please check output below:\n\n"
for name, fails in self.fails.items():
for fail in fails:
self.log("FAILED:%s:%s" % (name, fail))
errors += "FAILED:%s:%s\n" % (name, fail)
self.log("="*50)
if self.no_routing_stop and self.no_routing_start:
dataplane_downtime = (self.no_routing_stop - self.no_routing_start).total_seconds()
else:
dataplane_downtime = ""
if self.total_disrupt_time:
# Add total downtime (calculated in physical warmboot test using packet disruptions)
dataplane_downtime = self.total_disrupt_time
dataplane_report = dict()
dataplane_report["downtime"] = str(dataplane_downtime)
dataplane_report["lost_packets"] = str(self.total_disrupt_packets) \
if self.total_disrupt_packets is not None else ""
controlplane_report = dict()
if self.no_control_stop and self.no_control_start:
controlplane_downtime = (self.no_control_stop - self.no_control_start).total_seconds()
else:
controlplane_downtime = ""
controlplane_report["downtime"] = str(controlplane_downtime)
controlplane_report["arp_ping"] = "" # TODO
self.report["dataplane"] = dataplane_report
self.report["controlplane"] = controlplane_report
with open(self.report_file_name, 'w') as reportfile:
json.dump(self.report, reportfile)
self.assertTrue(is_good, errors)
def runTest(self):
self.pre_reboot_test_setup()
try:
self.log("Check that device is alive and pinging")
self.fails['dut'].add("DUT is not ready for test")
self.wait_dut_to_warm_up()
self.fails['dut'].clear()
self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay)
thr = threading.Thread(target=self.reboot_dut)
thr.setDaemon(True)
thr.start()
self.wait_until_control_plane_down()
self.no_control_start = self.cpu_state.get_state_time('down')
if 'warm-reboot' in self.reboot_type:
finalizer_timeout = 60 + self.test_params['reboot_limit_in_seconds']
thr = threading.Thread(target=self.check_warmboot_finalizer,\
kwargs={'finalizer_timeout': finalizer_timeout})
thr.setDaemon(True)
thr.start()
self.warmboot_finalizer_thread = thr
if self.kvm_test:
self.handle_advanced_reboot_health_check_kvm()
self.handle_post_reboot_health_check_kvm()
else:
if self.reboot_type == 'fast-reboot':
self.handle_fast_reboot_health_check()
if 'warm-reboot' in self.reboot_type:
self.handle_warm_reboot_health_check()
self.handle_post_reboot_health_check()
if 'warm-reboot' in self.reboot_type:
total_timeout = finalizer_timeout + self.test_params['warm_up_timeout_secs']
start_time = datetime.datetime.now()
# Wait until timeout happens OR the IO test completes
while ((datetime.datetime.now() - start_time).seconds < total_timeout) and\
self.warmboot_finalizer_thread.is_alive():
time.sleep(0.5)
if self.warmboot_finalizer_thread.is_alive():
self.fails['dut'].add("Warmboot Finalizer hasn't finished for {} seconds. Finalizer state: {}".format(total_timeout, self.get_warmboot_finalizer_state()))
# Check sonic version after reboot
self.check_sonic_version_after_reboot()
except Exception as e:
self.fails['dut'].add(e)
finally:
self.handle_post_reboot_test_reports()
def neigh_lag_status_check(self):
"""
Ensure there are no interface flaps after warm-boot
"""
for neigh in self.ssh_targets:
self.neigh_handle = Arista(neigh, None, self.test_params)
self.neigh_handle.connect()
fails, flap_cnt = self.neigh_handle.verify_neigh_lag_no_flap()
self.neigh_handle.disconnect()
self.fails[neigh] |= fails
if not flap_cnt:
self.log("No LAG flaps seen on %s after warm boot" % neigh)
else:
self.fails[neigh].add("LAG flapped %s times on %s after warm boot" % (flap_cnt, neigh))
def check_sonic_version_after_reboot(self):
# Check sonic version after reboot
target_version = self.test_params['target_version']
if target_version:
stdout, stderr, return_code = self.dut_connection.execCommand("sudo sonic_installer list | grep Current | awk '{print $2}'")
current_version = ""
if stdout != []:
current_version = str(stdout[0]).replace('\n', '')
self.log("Current={} Target={}".format(current_version, target_version))
if current_version != target_version:
raise Exception("Sonic upgrade failed. Target={} Current={}".format(\
target_version, current_version))
def extract_no_cpu_replies(self, arr):
"""
This function tries to extract number of replies from dataplane, when control plane is non working
"""
# remove all tail zero values
non_zero = filter(lambda x : x > 0, arr)
# check that last value is different from previos
if len(non_zero) > 1 and non_zero[-1] < non_zero[-2]:
return non_zero[-2]
else:
return non_zero[-1]
def reboot_dut(self):
time.sleep(self.reboot_delay)
self.log("Rebooting remote side")
stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type, timeout=30)
if stdout != []:
self.log("stdout from %s: %s" % (self.reboot_type, str(stdout)))
if stderr != []:
self.log("stderr from %s: %s" % (self.reboot_type, str(stderr)))
self.fails['dut'].add("{} failed with error {}".format(self.reboot_type, stderr))
thread.interrupt_main()
raise Exception("{} failed with error {}".format(self.reboot_type, stderr))
self.log("return code from %s: %s" % (self.reboot_type, str(return_code)))
# Note: a timeout reboot in ssh session will return a 255 code
if return_code not in [0, 255]:
thread.interrupt_main()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
self.log('SSH thread for VM {} started'.format(ip))
ssh = Arista(ip, queue, self.test_params, log_cb=self.log)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip] = ssh.run()
self.log('SSH thread for VM {} finished'.format(ip))
def wait_until_cpu_port_down(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_down')
if self.cpu_state.get() == 'down':
break
time.sleep(self.TIMEOUT)
def wait_until_cpu_port_up(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_up')
if self.cpu_state.get() == 'up':
break
time.sleep(self.TIMEOUT)
def apply_filter_all_ports(self, filter_expression):
for p in self.dataplane.ports.values():
port = p.get_packet_source()
scapyall.attach_filter(port.socket, filter_expression)
def send_in_background(self, packets_list = None, interval = None):
"""
This method sends predefined list of packets with predefined interval.
"""
if not interval:
interval = self.send_interval
if not packets_list:
packets_list = self.packets_list
self.sniffer_started.wait(timeout=10)
with self.dataplane_io_lock:
# While running fast data plane sender thread there are two reasons for filter to be applied
# 1. filter out data plane traffic which is tcp to free up the load on PTF socket (sniffer thread is using a different one)
# 2. during warm neighbor restoration DUT will send a lot of ARP requests which we are not interested in
# This is essential to get stable results
self.apply_filter_all_ports('not (arp and ether src {}) and not tcp'.format(self.test_params['dut_mac']))
sender_start = datetime.datetime.now()
self.log("Sender started at %s" % str(sender_start))
for entry in packets_list:
time.sleep(interval)
if self.vnet:
testutils.send_packet(self, entry[0], entry[1].decode("base64"))
else:
testutils.send_packet(self, *entry)
self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start))
# Remove filter
self.apply_filter_all_ports('')
def sniff_in_background(self, wait = None):
"""
This function listens on all ports, in both directions, for the TCP src=1234 dst=5000 packets, until timeout.
Once found, all packets are dumped to local pcap file,
and all packets are saved to self.packets as scapy type.
The native scapy.snif() is used as a background thread, to allow delayed start for the send_in_background().
"""
if not wait:
wait = self.time_to_listen + self.test_params['sniff_time_incr']
sniffer_start = datetime.datetime.now()
self.log("Sniffer started at %s" % str(sniffer_start))
sniff_filter = "tcp and tcp dst port 5000 and tcp src port 1234 and not icmp"
scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'wait': wait, 'sniff_filter': sniff_filter})
scapy_sniffer.start()
time.sleep(2) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
self.log("Sniffer has been running for %s" % str(datetime.datetime.now() - sniffer_start))
self.sniffer_started.clear()
def save_sniffed_packets(self):
filename = "/tmp/capture_%s.pcap" % self.logfile_suffix if self.logfile_suffix is not None else "/tmp/capture.pcap"
if self.packets:
scapyall.wrpcap(filename, self.packets)
self.log("Pcap file dumped to %s" % filename)
else:
self.log("Pcap file is empty.")
def scapy_sniff(self, wait = 180, sniff_filter = ''):
"""
This method exploits native scapy sniff() method.
"""
self.packets = scapyall.sniff(timeout = wait, filter = sniff_filter)
def send_and_sniff(self):
"""
This method starts two background threads in parallel:
one for sending, another for collecting the sent packets.
"""
self.sender_thr = threading.Thread(target = self.send_in_background)
self.sniff_thr = threading.Thread(target = self.sniff_in_background)
self.sniffer_started = threading.Event() # Event for the sniff_in_background status.
self.sniff_thr.start()
self.sender_thr.start()
self.sniff_thr.join()
self.sender_thr.join()
def check_tcp_payload(self, packet):
"""
This method is used by examine_flow() method.
It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload, as created by generate_bidirectional() method'.
"""
try:
int(str(packet[scapyall.TCP].payload)) in range(self.packets_to_send)
return True
except Exception as err:
return False
def no_flood(self, packet):
"""
This method filters packets which are unique (i.e. no floods).
"""
if (not int(str(packet[scapyall.TCP].payload)) in self.unique_id) and (packet[scapyall.Ether].src == self.dut_mac):
# This is a unique (no flooded) received packet.
self.unique_id.append(int(str(packet[scapyall.TCP].payload)))
return True
elif packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet.
return True
else:
return False
def examine_flow(self, filename = None):
"""
This method examines pcap file (if given), or self.packets scapy file.
The method compares TCP payloads of the packets one by one (assuming all payloads are consecutive integers),
and the losses if found - are treated as disruptions in Dataplane forwarding.
All disruptions are saved to self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time, disrupt_start_timestamp, disrupt_stop_timestamp)
"""
if filename:
all_packets = scapyall.rdpcap(filename)
elif self.packets:
all_packets = self.packets
else:
self.log("Filename and self.packets are not defined.")
self.fails['dut'].add("Filename and self.packets are not defined")
return None
# Filter out packets and remove floods:
self.unique_id = list() # This list will contain all unique Payload ID, to filter out received floods.
filtered_packets = [ pkt for pkt in all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
if self.vnet:
decap_packets = [ scapyall.Ether(str(pkt.payload.payload.payload)[8:]) for pkt in all_packets if
scapyall.UDP in pkt and
pkt[scapyall.UDP].sport == 1234
]
filtered_decap_packets = [ pkt for pkt in decap_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
filtered_packets = filtered_packets + filtered_decap_packets
# Re-arrange packets, if delayed, by Payload ID and Timestamp:
packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload)), packet.time ))
self.lost_packets = dict()
self.max_disrupt, self.total_disruption = 0, 0
sent_packets = dict()
self.fails['dut'].add("Sniffer failed to capture any traffic")
self.assertTrue(packets, "Sniffer failed to capture any traffic")
self.fails['dut'].clear()
if packets:
prev_payload, prev_time = 0, 0
sent_payload = 0
received_counter = 0 # Counts packets from dut.
self.disruption_start, self.disruption_stop = None, None
for packet in packets:
if packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet - keep track of it as payload_id:timestamp.
sent_payload = int(str(packet[scapyall.TCP].payload))
sent_packets[sent_payload] = packet.time
continue
if packet[scapyall.Ether].src == self.dut_mac:
# This is a received packet.
received_time = packet.time
received_payload = int(str(packet[scapyall.TCP].payload))
received_counter += 1
if not (received_payload and received_time):
# This is the first valid received packet.
prev_payload = received_payload
prev_time = received_time
continue
if received_payload - prev_payload > 1:
# Packets in a row are missing, a disruption.
lost_id = (received_payload -1) - prev_payload # How many packets lost in a row.
disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1]) # How long disrupt lasted.
# Add disrupt to the dict:
self.lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
self.log("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt))
if not self.disruption_start:
self.disruption_start = datetime.datetime.fromtimestamp(prev_time)
self.disruption_stop = datetime.datetime.fromtimestamp(received_time)
prev_payload = received_payload
prev_time = received_time
self.fails['dut'].add("Sniffer failed to filter any traffic from DUT")
self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT")
self.fails['dut'].clear()
self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
if self.lost_packets:
# Find the longest loss with the longest time:
max_disrupt_from_id, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \
max(self.lost_packets.items(), key = lambda item:item[1][0:2])
self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
self.log("Disruptions happen between %s and %s after the reboot." % \
(str(self.disruption_start - self.reboot_start), str(self.disruption_stop - self.reboot_start)))
else:
self.max_lost_id = 0
self.max_disrupt_time = 0
self.total_disrupt_packets = 0
self.total_disrupt_time = 0
self.log("Gaps in forwarding not found.")
self.log("Total incoming packets captured %d" % received_counter)
if packets:
filename = '/tmp/capture_filtered.pcap' if self.logfile_suffix is None else "/tmp/capture_filtered_%s.pcap" % self.logfile_suffix
scapyall.wrpcap(filename, packets)
self.log("Filtered pcap dumped to %s" % filename)
def check_forwarding_stop(self, signal):
self.asic_start_recording_vlan_reachability()
while not signal.is_set():
state = self.asic_state.get()
for _, q in self.ssh_jobs:
self.put_nowait(q, 'check_stop')
if state == 'down':
break
time.sleep(self.TIMEOUT)
self.asic_stop_recording_vlan_reachability()
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def check_forwarding_resume(self, signal):
while not signal.is_set():
state = self.asic_state.get()
if state != 'down':
break
time.sleep(self.TIMEOUT)
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def ping_data_plane(self, light_probe=True):
self.dataplane.flush()
replies_from_servers = self.pingFromServers()
if replies_from_servers > 0 or not light_probe:
replies_from_upper = self.pingFromUpperTier()
else:
replies_from_upper = 0
return replies_from_servers, replies_from_upper
def wait_dut_to_warm_up(self):
# When the DUT is freshly rebooted, it appears that it needs to warm
# up towards PTF docker. In practice, I've seen this warm up taking
# up to ~70 seconds.
fail = None
dut_stabilize_secs = int(self.test_params['dut_stabilize_secs'])
warm_up_timeout_secs = int(self.test_params['warm_up_timeout_secs'])
start_time = datetime.datetime.now()
up_time = None
# First wait until DUT data/control planes are up
while True:
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if dataplane == 'up' and ctrlplane == 'up':
if not up_time:
up_time = datetime.datetime.now()
up_secs = (datetime.datetime.now() - up_time).total_seconds()
if up_secs > dut_stabilize_secs:
break
else:
# reset up_time
up_time = None
if elapsed > warm_up_timeout_secs:
raise Exception("Control plane didn't come up within warm up timeout")
time.sleep(1)
# check until flooding is over. Flooding happens when FDB entry of
# certain host is not yet learnt by the ASIC, therefore it sends
# packet to all vlan ports.
uptime = datetime.datetime.now()
while True:
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if not self.asic_state.is_flooding() and elapsed > dut_stabilize_secs:
break
if elapsed > warm_up_timeout_secs:
if self.allow_vlan_flooding:
break
raise Exception("Data plane didn't stop flooding within warm up timeout")
time.sleep(1)
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
if not dataplane == 'up':
fail = "Data plane"
elif not ctrlplane == 'up':
fail = "Control plane"
if fail is not None:
raise Exception("{} went down while waiting for flooding to stop".format(fail))
if self.asic_state.get_state_time('up') > uptime:
fail = "Data plane"
elif self.cpu_state.get_state_time('up') > uptime:
fail = "Control plane"
if fail is not None:
raise Exception("{} flapped while waiting for the warm up".format(fail))
# Everything is good
def check_alive(self):
# This function checks that DUT routes the packets in the both directions.
#
# Sometimes first attempt failes because ARP responses to DUT are not so fast.
# But after this the function expects to see steady "replies".
# If the function sees that there is an issue with the dataplane after we saw
# successful replies it considers that the DUT is not healthy
#
# Sometimes I see that DUT returns more replies then requests.
# I think this is because of not populated FDB table
# The function waits while it's done
uptime = None
for counter in range(self.nr_tests * 2):
state = self.asic_state.get()
if state == 'up':
if not uptime:
uptime = self.asic_state.get_state_time(state)
else:
if uptime:
raise Exception("Data plane stopped working")
time.sleep(2)
# wait, until FDB entries are populated
for _ in range(self.nr_tests * 10): # wait for some time
if self.asic_state.is_flooding():
time.sleep(2)
else:
break
else:
raise Exception("DUT is flooding")
def get_asic_vlan_reachability(self):
return self.asic_vlan_reach
def asic_start_recording_vlan_reachability(self):
with self.vlan_lock:
self.asic_vlan_reach = []
self.recording = True
def asic_stop_recording_vlan_reachability(self):
with self.vlan_lock:
self.recording = False
def try_record_asic_vlan_recachability(self, t1_to_vlan):
with self.vlan_lock:
if self.recording:
self.asic_vlan_reach.append(t1_to_vlan)
def log_asic_state_change(self, reachable, partial=False, t1_to_vlan=0, flooding=False):
old = self.asic_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.try_record_asic_vlan_recachability(t1_to_vlan)
self.asic_state.set_flooding(flooding)
if old != state:
self.log("Data plane state transition from %s to %s (%d)" % (old, state, t1_to_vlan))
self.asic_state.set(state)
def log_cpu_state_change(self, reachable, partial=False, flooding=False):
old = self.cpu_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.cpu_state.set_flooding(flooding)
if old != state:
self.log("Control plane state transition from %s to %s" % (old, state))
self.cpu_state.set(state)
def log_vlan_state_change(self, reachable):
old = self.vlan_state.get()
if reachable:
state = 'up'
else:
state = 'down'
if old != state:
self.log("VLAN ARP state transition from %s to %s" % (old, state))
self.vlan_state.set(state)
def reachability_watcher(self):
# This function watches the reachability of the CPU port, and ASIC. It logs the state
# changes for future analysis
self.watcher_is_stopped.clear() # Watcher is running.
while self.watching:
if self.dataplane_io_lock.acquire(False):
vlan_to_t1, t1_to_vlan = self.ping_data_plane(self.light_probe)
reachable = (t1_to_vlan > self.nr_vl_pkts * 0.7 and
vlan_to_t1 > self.nr_pc_pkts * 0.7)
partial = (reachable and
(t1_to_vlan < self.nr_vl_pkts or
vlan_to_t1 < self.nr_pc_pkts))
flooding = (reachable and
(t1_to_vlan > self.nr_vl_pkts or
vlan_to_t1 > self.nr_pc_pkts))
self.log_asic_state_change(reachable, partial, t1_to_vlan, flooding)
self.dataplane_io_lock.release()
total_rcv_pkt_cnt = self.pingDut()
reachable = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt > self.ping_dut_pkts * 0.7
partial = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt < self.ping_dut_pkts
flooding = reachable and total_rcv_pkt_cnt > self.ping_dut_pkts
self.log_cpu_state_change(reachable, partial, flooding)
total_rcv_pkt_cnt = self.arpPing()
reachable = total_rcv_pkt_cnt >= self.arp_ping_pkts
self.log_vlan_state_change(reachable)
self.watcher_is_running.set() # Watcher is running.
self.watcher_is_stopped.set() # Watcher has stopped.
self.watcher_is_running.clear() # Watcher has stopped.
def pingFromServers(self):
for i in xrange(self.nr_pc_pkts):
testutils.send_packet(self, self.from_server_src_port, self.from_vlan_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d servers->t1" % (self.nr_pc_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingFromUpperTier(self):
for entry in self.from_t1:
testutils.send_packet(self, *entry)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d t1->servers" % (self.nr_vl_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingDut(self):
for i in xrange(self.ping_dut_pkts):
testutils.send_packet(self, self.random_port(self.vlan_ports), self.ping_dut_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d ping DUT" % (self.ping_dut_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def arpPing(self):
for i in xrange(self.arp_ping_pkts):
testutils.send_packet(self, self.arp_src_port, self.arp_ping)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d arp ping" % (self.arp_ping_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
|
augment_agent_meta_data.py
|
import os
import sys
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))
import json
import glob
import os
import constants
import cv2
import shutil
import numpy as np
import argparse
import threading
import time
import copy
import random
from gen.utils.video_util import VideoSaver
from gen.utils.py_util import walklevel
from env.thor_env import ThorEnv
# event.metadata['agent']
TRAJ_DATA_JSON_FILENAME = "traj_data.json"
ORIGINAL_IMAGES_FORLDER = "raw_images"
AGENT_META_FOLDER = "agent_meta"
AGENT_EXPLORATION_META_FOLDER = "exploration_agent_meta"
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
render_settings = dict()
render_settings['renderImage'] = True
render_settings['renderDepthImage'] = True
render_settings['renderObjectImage'] = True
render_settings['renderClassImage'] = True
video_saver = VideoSaver()
fail_log = open("fail_log.txt", "w")
def get_openable_points(traj_data):
scene_num = traj_data['scene']['scene_num']
openable_json_file = os.path.join(os.environ['ALFRED_ROOT'], 'gen/layouts/FloorPlan%d-openable.json' % scene_num)
with open(openable_json_file, 'r') as f:
openable_points = json.load(f)
return openable_points
def explore_scene(env, traj_data, root_dir):
'''
Use pre-computed openable points from ALFRED to store receptacle locations
'''
openable_points = get_openable_points(traj_data)
agent_height = env.last_event.metadata['agent']['position']['y']
for recep_id, point in openable_points.items():
recep_class = recep_id.split("|")[0]
action = {'action': 'TeleportFull',
'x': point[0],
'y': agent_height,
'z': point[1],
'rotateOnTeleport': False,
'rotation': point[2],
'horizon': point[3]}
event = env.step(action)
save_frame(env, event, root_dir, folder_name="EXPLORATION")
# class_detections2D
def save_frame(env, event, root_dir, task_desc='None', folder_name="OBJECT_META_FOLDER"):
# META DATA
agent_meta_path = os.path.join(root_dir, AGENT_META_FOLDER)
# EXPLORATION_IMG
if folder_name == "EXPLORATION":
agent_meta_path = os.path.join(root_dir, AGENT_EXPLORATION_META_FOLDER)
# META DATA
im_idx = get_json_index(agent_meta_path)
# store color to object type dictionary
meta_agent = env.last_event.metadata['agent']
# save sgg meta
sgg_meta_file = os.path.join(agent_meta_path, "%09d.json" % (im_idx))
with open(sgg_meta_file, 'w') as f:
json.dump(meta_agent, f)
def get_json_index(save_path):
file = glob.glob(save_path + '/*.json')
return len(file)
def get_image_index(save_path):
max_img = max(len(glob.glob(save_path + '/*.png')), len(glob.glob(save_path + '/*.jpg')))
return max_img
def save_image_with_delays(env, action,
save_path, direction=constants.BEFORE):
im_ind = get_json_index(save_path)
counts = constants.SAVE_FRAME_BEFORE_AND_AFTER_COUNTS[action['action']][direction]
for i in range(counts):
save_frame(env, env.last_event, save_path)
env.noop()
return im_ind
def save_images_in_events(env, events, root_dir):
for event in events:
save_frame(env, event, root_dir)
def check_dir(path):
if os.path.exists(path):
return True
os.mkdir(path)
return False
def clear_and_create_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
def augment_traj(env, json_file):
# load json data
with open(json_file) as f:
traj_data = json.load(f)
# make directories
root_dir = json_file.replace(TRAJ_DATA_JSON_FILENAME, "")
orig_images_dir = os.path.join(root_dir, ORIGINAL_IMAGES_FORLDER)
agent_meta_path = os.path.join(root_dir, AGENT_META_FOLDER)
exploration_agent_meta_path = os.path.join(root_dir, AGENT_EXPLORATION_META_FOLDER)
# fresh images list
traj_data['images'] = list()
clear_and_create_dir(agent_meta_path)
clear_and_create_dir(exploration_agent_meta_path)
# print("no clear_and_create_dir")
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
object_toggles = traj_data['scene']['object_toggles']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
# reset
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
print(agent_meta_path)
explore_scene(env, traj_data, root_dir)
env.step(dict(traj_data['scene']['init_action']))
# print("Task: %s" % (traj_data['template']['task_desc']))
# setup task
env.set_task(traj_data, args, reward_type='dense')
rewards = []
for ll_idx, ll_action in enumerate(traj_data['plan']['low_actions']):
# next cmd under the current hl_action
cmd = ll_action['api_action']
hl_action = traj_data['plan']['high_pddl'][ll_action['high_idx']]
# remove unnecessary keys
cmd = {k: cmd[k] for k in ['action', 'objectId', 'receptacleObjectId', 'placeStationary', 'forceAction'] if k in cmd}
if "MoveAhead" in cmd['action']:
if args.smooth_nav:
save_frame(env, env.last_event, root_dir)
events = env.smooth_move_ahead(cmd, render_settings)
save_images_in_events(env, events, root_dir)
event = events[-1]
else:
save_frame(env, env.last_event, root_dir)
event = env.step(cmd)
elif "Rotate" in cmd['action']:
if args.smooth_nav:
save_frame(env, env.last_event, root_dir)
events = env.smooth_rotate(cmd, render_settings)
save_images_in_events(env, events, root_dir)
event = events[-1]
else:
save_frame(env, env.last_event, root_dir)
event = env.step(cmd)
elif "Look" in cmd['action']:
if args.smooth_nav:
save_frame(env, env.last_event, root_dir)
events = env.smooth_look(cmd, render_settings)
save_images_in_events(env, events, root_dir)
event = events[-1]
else:
save_frame(env, env.last_event, root_dir)
event = env.step(cmd)
# handle the exception for CoolObject tasks where the actual 'CoolObject' action is actually 'CloseObject'
# TODO: a proper fix for this issue
elif "CloseObject" in cmd['action'] and \
"CoolObject" in hl_action['planner_action']['action'] and \
"OpenObject" in traj_data['plan']['low_actions'][ll_idx + 1]['api_action']['action']:
if args.time_delays:
cool_action = hl_action['planner_action']
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.BEFORE)
event = env.step(cmd)
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.MIDDLE)
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.AFTER)
else:
save_frame(env, env.last_event, root_dir)
event = env.step(cmd)
else:
if args.time_delays:
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.BEFORE)
event = env.step(cmd)
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.MIDDLE)
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.AFTER)
else:
save_frame(env, env.last_event, root_dir)
event = env.step(cmd)
if not event.metadata['lastActionSuccess']:
print("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
fail_log.write("Replay Failed: %s \n" % (env.last_event.metadata['errorMessage']))
raise Exception("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
reward, _ = env.get_transition_reward()
rewards.append(reward)
# save 10 frames in the end as per the training data
for _ in range(10):
save_frame(env, env.last_event, root_dir)
# check if number of new images is the same as the number of original images
if args.smooth_nav and args.time_delays:
orig_img_count = get_image_index(orig_images_dir)
object_meta_count = get_json_index(agent_meta_path)
print ("Original Image Count %d, New Image Count %d" % (orig_img_count, object_meta_count))
if orig_img_count != object_meta_count:
print("sequence length doesn't match\n" + agent_meta_path + "\n")
fail_log.write("sequence length doesn't match\n" + agent_meta_path + "\n")
fail_log.write("Original Image Count %d, New Image Count %d" % (orig_img_count, object_meta_count))
raise Exception("WARNING: the augmented sequence length doesn't match the original")
def run():
'''
replay loop
'''
# start THOR env
env = ThorEnv(player_screen_width=IMAGE_WIDTH,
player_screen_height=IMAGE_HEIGHT)
skipped_files = []
while len(traj_list) > 0:
lock.acquire()
json_file = traj_list.pop()
lock.release()
print ("Augmenting: " + json_file)
try:
augment_traj(env, json_file)
except Exception as e:
import traceback
traceback.print_exc()
print ("Error: " + repr(e))
print ("Skipping " + json_file)
skipped_files.append(json_file)
fail_log.write(repr(e) + "\n")
fail_log.write(json_file + "\n")
env.stop()
print("Finished.")
# skipped files
if len(skipped_files) > 0:
print("Skipped Files:")
print(skipped_files)
traj_list = []
lock = threading.Lock()
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default="data/2.1.0")
parser.add_argument('--split', type=str, default='valid_seen', choices=['train', 'valid_seen', 'valid_unseen'])
parser.add_argument('--smooth_nav', dest='smooth_nav', action='store_true')
parser.add_argument('--time_delays', dest='time_delays', action='store_true')
parser.add_argument('--shuffle', dest='shuffle', action='store_true')
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--reward_config', type=str, default='../models/config/rewards.json')
args = parser.parse_args()
# make a list of all the traj_data json files
for split in ['train/', 'valid_seen/', 'valid_unseen/']:
for dir_name, subdir_list, file_list in walklevel(args.data_path + split, level=2):
if "trial_" in dir_name:
json_file = os.path.join(dir_name, TRAJ_DATA_JSON_FILENAME)
# import pdb; pdb.set_trace()
if not os.path.isfile(json_file):
continue
traj_list.append(json_file)
# traj_list = ['../data/full_2.1.0/train/pick_heat_then_place_in_recep-Egg-None-Fridge-13/trial_T20190907_151643_465634/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-PotatoSliced-None-DiningTable-24/trial_T20190908_194409_961394/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-Spatula-Pan-DiningTable-28/trial_T20190907_222606_903630/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-AppleSliced-None-DiningTable-27/trial_T20190907_171803_405680/traj_data.json', '../data/full_2.1.0/train/pick_heat_then_place_in_recep-PotatoSliced-None-SinkBasin-14/trial_T20190910_120350_730711/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-LettuceSliced-None-SinkBasin-4/trial_T20190909_101847_813539/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-Lettuce-None-SinkBasin-23/trial_T20190908_173530_026785/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-LettuceSliced-Pan-DiningTable-28/trial_T20190906_232604_097173/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-Spoon-Bowl-SinkBasin-27/trial_T20190907_213616_713879/traj_data.json', '../data/full_2.1.0/train/pick_heat_then_place_in_recep-AppleSliced-None-SideTable-3/trial_T20190908_110347_206140/traj_data.json', '../data/full_2.1.0/train/pick_clean_then_place_in_recep-LettuceSliced-None-Fridge-11/trial_T20190918_174139_904388/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-PotatoSliced-None-GarbageCan-11/trial_T20190909_013637_168506/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-Pan-None-StoveBurner-23/trial_T20190906_215826_707811/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-Plate-None-Shelf-20/trial_T20190907_034714_802572/traj_data.json', '../data/full_2.1.0/train/look_at_obj_in_light-Pen-None-DeskLamp-316/trial_T20190908_061814_700195/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-PotatoSliced-None-CounterTop-19/trial_T20190909_053101_102010/traj_data.json', '../data/full_2.1.0/train/look_at_obj_in_light-Laptop-None-DeskLamp-319/trial_T20190908_182531_510491/traj_data.json', '../data/full_2.1.0/train/look_at_obj_in_light-Laptop-None-DeskLamp-319/trial_T20190908_182720_056041/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-LettuceSliced-Pot-DiningTable-21/trial_T20190907_160923_689765/traj_data.json', '../data/full_2.1.0/train/look_at_obj_in_light-Pillow-None-DeskLamp-319/trial_T20190907_224211_927258/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-LettuceSliced-None-GarbageCan-6/trial_T20190907_210244_406018/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-AppleSliced-Bowl-Fridge-26/trial_T20190908_162237_908840/traj_data.json', '../data/full_2.1.0/train/pick_and_place_simple-ToiletPaper-None-ToiletPaperHanger-407/trial_T20190909_081822_309167/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-Pen-Bowl-Dresser-311/trial_T20190908_170820_174380/traj_data.json', '../data/full_2.1.0/train/pick_clean_then_place_in_recep-Ladle-None-Drawer-4/trial_T20190909_161523_929674/traj_data.json', '../data/full_2.1.0/train/pick_cool_then_place_in_recep-Apple-None-Microwave-19/trial_T20190906_210805_698141/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-AppleSliced-Bowl-Fridge-21/trial_T20190908_054316_003433/traj_data.json', '../data/full_2.1.0/train/pick_and_place_with_movable_recep-Ladle-Bowl-SinkBasin-30/trial_T20190907_143416_683614/traj_data.json', '../data/full_2.1.0/train/pick_heat_then_place_in_recep-PotatoSliced-None-SinkBasin-23/trial_T20190907_123248_978930/traj_data.json', ]
# random shuffle
if args.shuffle:
random.shuffle(traj_list)
# start threads
# run()
threads = []
for n in range(args.num_threads):
thread = threading.Thread(target=run)
threads.append(thread)
thread.start()
time.sleep(1)
|
test_common.py
|
from __future__ import absolute_import, unicode_literals
import pytest
import socket
from amqp import RecoverableConnectionError
from case import ContextMock, Mock, patch
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX, generate_oid
)
from t.mocks import MockPool
def test_generate_oid():
from uuid import NAMESPACE_OID
from kombu.five import bytes_if_py2
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = bytes_if_py2('%x-%x-%x-%x' % args)
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def test_cacheable(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.auto_delete = False
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel)
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
maybe_declare(entity, channel)
assert entity.declare.call_count == 1
entity.channel.connection = None
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
channel = Mock()
channel.connection.client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = False
entity.bind.return_value = entity
entity.bind.return_value.channel = channel
maybe_declare(entity, channel)
entity.bind.assert_called_with(channel)
def test_with_retry(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel, retry=True)
assert channel.connection.client.ensure.call_count
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer(object):
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages:
class MockConnection(object):
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
vclustermgr.py
|
#!/usr/bin/python3
import os, random, json, sys, imagemgr, threading, servicemgr
import datetime
from log import logger
import env
##################################################
# VclusterMgr
# Description : VclusterMgr start/stop/manage virtual clusters
#
##################################################
class VclusterMgr(object):
def __init__(self, nodemgr, networkmgr, etcdclient, addr, mode):
self.mode = mode
self.nodemgr = nodemgr
self.imgmgr = imagemgr.ImageMgr()
self.servmgr = servicemgr.ServiceMgr()
self.networkmgr = networkmgr
self.addr = addr
self.etcd = etcdclient
self.defaultsize = env.getenv("CLUSTER_SIZE")
self.fspath = env.getenv("FS_PREFIX")
logger.info ("vcluster start on %s" % (self.addr))
if self.mode == 'new':
logger.info ("starting in new mode on %s" % (self.addr))
# check if all clusters data are deleted in httprest.py
clean = True
usersdir = self.fspath+"/global/users/"
for user in os.listdir(usersdir):
if len(os.listdir(usersdir+user+"/clusters")) > 0 or len(os.listdir(usersdir+user+"/hosts")) > 0:
clean = False
if not clean:
logger.error ("clusters files not clean, start failed")
sys.exit(1)
elif self.mode == "recovery":
logger.info ("starting in recovery mode on %s" % (self.addr))
self.recover_allclusters()
else:
logger.error ("not supported mode:%s" % self.mode)
sys.exit(1)
def recover_allclusters(self):
logger.info("recovering all vclusters for all users...")
usersdir = self.fspath+"/global/users/"
for user in os.listdir(usersdir):
for cluster in self.list_clusters(user)[1]:
logger.info ("recovering cluster:%s for user:%s ..." % (cluster, user))
self.recover_cluster(cluster, user)
logger.info("recovered all vclusters for all users")
def create_cluster(self, clustername, username, image, onenode=None, multinodes=None):
if self.is_cluster(clustername, username):
return [False, "cluster:%s already exists" % clustername]
logger.info ('onenode : %s, multinodes : %s' % (onenode, multinodes))
clustersize = self.defaultsize;
[clustersize, service] = self.servmgr.create_service(username, clustername, image, onenode, multinodes)
logger.info ("starting cluster %s with %d containers for %s" % (clustername, clustersize, username))
workers = self.nodemgr.get_rpcs()
if (len(workers) == 0):
logger.warning ("no workers to start containers, start cluster failed")
return [False, "no workers are running"]
imagename = image['name']
imageowner = image['owner']
imagetype = image['type']
#logger.info ("imagename : %s, imageowner : %s, imagetype : %s" % (imagename, imageowner, imagetype))
# check user IP pool status, should be moved to user init later
if not self.networkmgr.has_user(username):
self.networkmgr.add_user(username, cidr=29)
[status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize)
gateway = self.networkmgr.get_usergw(username)
vlanid = self.networkmgr.get_uservlanid(username)
logger.info ("create cluster with gateway : %s" % gateway)
self.networkmgr.printpools()
if not status:
return [False, result]
ips = result
clusterid = self._acquire_id()
clusterpath = self.fspath+"/global/users/"+username+"/clusters/"+clustername
hostpath = self.fspath+"/global/users/"+username+"/hosts/"+str(clusterid)+".hosts"
hosts = "127.0.0.1\tlocalhost\n"
containers = []
for i in range(0, clustersize):
onework = workers[random.randint(0, len(workers)-1)]
lxc_name = username + "-" + str(clusterid) + "-" + str(i)
hostname = "host-"+str(i)
logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, imagename-%s, imageowner-%s, imagetype-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, imagename, imageowner, imagetype))
onework.create_container(lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, str(vlanid), imagename, imageowner, imagetype )
logger.info("container create success")
hosts = hosts + ips[i].split("/")[0] + "\t" + hostname + "\t" + hostname + "."+clustername + "\n"
containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") })
hostfile = open(hostpath, 'w')
hostfile.write(hosts)
hostfile.close()
clusterfile = open(clusterpath, 'w')
info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" }
info['services'] = service
clusterfile.write(json.dumps(info))
clusterfile.close()
return [True, info]
def scale_out_cluster(self,clustername,username,image,extensive,onenode):
if not self.is_cluster(clustername,username):
return [False, "cluster:%s not found" % clustername]
workers = self.nodemgr.get_rpcs()
if (len(workers) == 0):
logger.warning("no workers to start containers, scale out failed")
return [False, "no workers are running"]
imagename = image['name']
imageowner = image['owner']
imagetype = image['type']
[status, result] = self.networkmgr.acquire_userips_cidr(username)
gateway = self.networkmgr.get_usergw(username)
vlanid = self.networkmgr.get_uservlanid(username)
self.networkmgr.printpools()
if not status:
return [False, result]
ip = result[0]
[status, clusterinfo] = self.get_clusterinfo(clustername,username)
clusterid = clusterinfo['clusterid']
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts"
cid = clusterinfo['nextcid']
[newservices, cmd] = self.servmgr.scale_out(username, clustername, clusterinfo, cid, extensive, onenode, image)
onework = workers[random.randint(0, len(workers)-1)]
lxc_name = username + "-" + str(clusterid) + "-" + str(cid)
hostname = "host-" + str(cid)
onework.create_container(lxc_name, username, clustername, clusterid, hostname, ip, gateway, str(vlanid), imagename, imageowner, imagetype)
if clusterinfo['status'] == "running":
onework.start_container(lxc_name)
onework.start_services(lxc_name, cmd, False)
logger.info("scale out success")
hostfile = open(hostpath, 'a')
hostfile.write(ip.split("/")[0] + "\t" + hostname + "\t" + hostname + "." + clustername + "\n")
hostfile.close()
clusterinfo['nextcid'] = int(clusterinfo['nextcid']) + 1
clusterinfo['size'] = int(clusterinfo['size']) + 1
clusterinfo['services'] = newservices
clusterinfo['containers'].append({'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") })
clusterfile = open(clusterpath, 'w')
clusterfile.write(json.dumps(clusterinfo))
clusterfile.close()
return [True, clusterinfo]
def flush_cluster(self,username,clustername,containername):
begintime = datetime.datetime.now()
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
containers = info['containers']
imagetmp = username + "_tmp_docklet"
for container in containers:
if container['containername'] == containername:
logger.info("container: %s found" % containername)
onework = self.nodemgr.ip_to_rpc(container['host'])
onework.create_image(username,imagetmp,containername)
fimage = container['image']
logger.info("image: %s created" % imagetmp)
break
else:
logger.error("container: %s not found" % containername)
threads = []
for container in containers:
if container['containername'] != containername:
logger.info("container: %s now flush" % container['containername'])
onework = self.nodemgr.ip_to_rpc(container['host'])
#t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername']))
#threads.append(t)
onework.flush_container(username,imagetmp,container['containername'])
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
container['image'] = fimage
logger.info("thread for container: %s has been prepared" % container['containername'])
# for t in threads:
# t.start()
# for t in threads:
# t.join()
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
infofile = open(clusterpath,'w')
infofile.write(json.dumps(info))
infofile.close()
self.imgmgr.removeImage(username,imagetmp)
endtime = datetime.datetime.now()
dtime = (endtime - begintime).seconds
logger.info("flush spend %s seconds" % dtime)
logger.info("flush success")
def create_image(self,username,clustername,containername,imagename,description,isforce=False):
[status, info] = self.get_clusterinfo(clustername,username)
if not status:
return [False, "cluster not found"]
containers = info['containers']
for container in containers:
if container['containername'] == containername:
logger.info("container: %s found" % containername)
onework = self.nodemgr.ip_to_rpc(container['host'])
res = onework.create_image(username,imagename,containername,description,isforce)
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
container['image'] = imagename
break
else:
res = [False, "container not found"]
logger.error("container: %s not found" % containername)
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
infofile = open(clusterpath, 'w')
infofile.write(json.dumps(info))
infofile.close()
return res
def delete_cluster(self, clustername, username):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
if info['status']=='running':
return [False, "cluster is still running, you need to stop it and then delete"]
ips = []
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker.delete_container(container['containername'])
ips.append(container['ip'])
logger.info("delete vcluster and release vcluster ips")
self.networkmgr.release_userips(username, ips)
self.networkmgr.printpools()
os.remove(self.fspath+"/global/users/"+username+"/clusters/"+clustername)
os.remove(self.fspath+"/global/users/"+username+"/hosts/"+str(info['clusterid'])+".hosts")
return [True, "cluster delete"]
def scale_in_cluster(self, clustername, username, containername):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
new_containers = []
for container in info['containers']:
if container['containername'] == containername:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker.delete_container(containername)
self.networkmgr.release_userips(username, container['ip'])
self.networkmgr.printpools()
else:
new_containers.append(container)
info['containers'] = new_containers
info['size'] -= 1
cid = containername[containername.rindex("-")+1:]
clusterid = info['clusterid']
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts"
clusterfile = open(clusterpath, 'w')
clusterfile.write(json.dumps(info))
clusterfile.close()
hostfile = open(hostpath, 'r')
hostinfo = hostfile.readlines()
hostfile.close()
hostfile = open(hostpath, 'w')
new_hostinfo = []
new_hostinfo.append(hostinfo[0])
for host in hostinfo[1:]:
parts = host.split("\t")
if parts[1][parts[1].rindex("-")+1:] == cid:
pass
else:
new_hostinfo.append(host)
hostfile.writelines(new_hostinfo)
hostfile.close()
return [True, info]
def start_cluster(self, clustername, username):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
if info['status'] == 'running':
return [False, "cluster is already running"]
#prepare services
services = self.servmgr.gen_servicecmd(clustername, username, info)
#start containers and services
i = 0
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker.start_container(container['containername'])
worker.start_services(container['containername'], services[i], i == 0)
i = i + 1
info['status']='running'
info['start_time']=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
infofile.write(json.dumps(info))
infofile.close()
return [True, "start cluster"]
def recover_cluster(self, clustername, username):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
if info['status'] == 'stopped':
return [True, "cluster no need to start"]
# TODO : need to check and recover gateway of this user
# TODO : need to check and recover proxy of this cluster
# recover containers of this cluster
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker.recover_container(container['containername'])
return [True, "start cluster"]
# maybe here should use cluster id
def stop_cluster(self, clustername, username):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
if info['status'] == 'stopped':
return [False, 'cluster is already stopped']
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker.stop_container(container['containername'])
info['status']='stopped'
info['start_time']="------"
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
infofile.write(json.dumps(info))
infofile.close()
return [True, "start cluster"]
def list_clusters(self, user):
if not os.path.exists(self.fspath+"/global/users/"+user+"/clusters"):
return [True, []]
clusters = os.listdir(self.fspath+"/global/users/"+user+"/clusters")
full_clusters = []
for cluster in clusters:
single_cluster = {}
single_cluster['name'] = cluster
[status, info] = self.get_clusterinfo(cluster,user)
if info['status'] == 'running':
single_cluster['status'] = 'running'
else:
single_cluster['status'] = 'stopping'
full_clusters.append(single_cluster)
return [True, clusters]
def is_cluster(self, clustername, username):
[status, clusters] = self.list_clusters(username)
if clustername in clusters:
return True
else:
return False
# get id from name
def get_clusterid(self, clustername, username):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return -1
if 'clusterid' in info:
return int(info['clusterid'])
logger.error ("internal error: cluster:%s info file has no clusterid " % clustername)
return -1
def get_clusterinfo(self, clustername, username):
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
if not os.path.isfile(clusterpath):
return [False, "cluster not found"]
infofile = open(clusterpath, 'r')
info = json.loads(infofile.read())
return [True, info]
# acquire cluster id from etcd
def _acquire_id(self):
clusterid = self.etcd.getkey("vcluster/nextid")[1]
self.etcd.setkey("vcluster/nextid", str(int(clusterid)+1))
return int(clusterid)
|
websocketServerModule.py
|
"""
Websocket server communication module
"""
from websocket_server import WebsocketServer
from simplesensor.shared.threadsafeLogger import ThreadsafeLogger
from simplesensor.shared.message import Message
from simplesensor.shared.moduleProcess import ModuleProcess
from distutils.version import LooseVersion, StrictVersion
from .version import __version__
from . import moduleConfigLoader as configLoader
from threading import Thread
import sys
import json
import time
class WebsocketServerModule(ModuleProcess):
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue):
# super(WebsocketServerModule, self).__init__()
ModuleProcess.__init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue)
self.alive = False
self.config = baseConfig
self.inQueue = pInBoundQueue # inQueue are messages from the main process to websocket clients
self.outQueue = pOutBoundQueue # outQueue are messages from clients to main process
self.websocketServer = None
self.loggingQueue = loggingQueue
self.threadProcessQueue = None
# Configs
self.moduleConfig = configLoader.load(self.loggingQueue, __name__)
# Constants
self._port = self.moduleConfig['WebsocketPort']
self._host = self.moduleConfig['WebsocketHost']
# logging setup
self.logger = ThreadsafeLogger(loggingQueue, __name__)
def run(self):
if not self.check_ss_version():
#cant run with wrong version so we return early
return False
""" Main thread entry point.
Sets up websocket server and event callbacks.
Starts thread to monitor inbound message queue.
"""
self.logger.info("Starting websocket server")
self.alive = True
self.listen()
self.websocketServer = WebsocketServer(self._port, host=self._host)
self.websocketServer.set_fn_new_client(self.new_websocket_client)
self.websocketServer.set_fn_message_received(self.websocket_message_received)
self.websocketServer.run_forever()
def check_ss_version(self):
#check for min version met
self.logger.info('Module version %s' %(__version__))
if LooseVersion(self.config['ss_version']) < LooseVersion(self.moduleConfig['MinSimpleSensorVersion']):
self.logger.error('This module requires a min SimpleSensor %s version. This instance is running version %s' %(self.moduleConfig['MinSimpleSensorVersion'],self.config['ss_version']))
return False
return True
def new_websocket_client(self, client, server):
""" Client joined callback - called whenever a new client joins. """
self.logger.debug("Client joined")
def websocket_message_received(self, client, server, message):
""" Message received callback - called whenever a new message is received. """
self.logger.debug('Message received: %s'%message)
message = json.loads(message)
self.logger.info("message jsond: %s"%message)
_msg = Message(
topic=message['topic'],
sender_id=message['sender_id']
)
if 'sender_type' in message:
_msg.sender_type=message['sender_type']
if 'recipients' in message:
_msg.recipients=message['recipients']
if 'extended_data' in message:
_msg.extended_data=message['extended_data']
self.put_message(_msg)
def listen(self):
self.threadProcessQueue = Thread(target=self.process_queue)
self.threadProcessQueue.setDaemon(True)
self.threadProcessQueue.start()
def shutdown(self):
""" Handle shutdown message.
Close and shutdown websocket server.
Join queue processing thread.
"""
self.logger.info("Shutting down websocket server")
try:
self.logger.info("Closing websocket")
self.websocketServer.server_close()
except Exception as e:
self.logger.error("Websocket close error : %s " %e)
try:
self.logger.info("Shutdown websocket")
self.websocketServer.shutdown()
except Exception as e:
self.logger.error("Websocket shutdown error : %s " %e)
self.alive = False
self.threadProcessQueue.join()
time.sleep(1)
self.exit = True
def handle_message(self, message):
""" Send message to listening clients. """
self.websocketServer.send_message_to_all(json.dumps(message.__dict__))
def process_queue(self):
""" Monitor queue of messages from main process to this thread. """
while self.alive:
if (self.inQueue.empty() == False):
try:
message = self.inQueue.get(block=False,timeout=1)
if message is not None:
if message.topic.upper() == "SHUTDOWN":
self.logger.debug("SHUTDOWN handled")
self.shutdown()
else:
self.handle_message(message)
except Exception as e:
self.logger.error("Websocket unable to read queue : %s " %e)
else:
time.sleep(.25)
|
test_dist_graph_store.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import dgl
import sys
import numpy as np
import time
import socket
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from multiprocessing import Process, Manager, Condition, Value
import multiprocessing as mp
from dgl.heterograph_index import create_unitgraph_from_coo
from dgl.data.utils import load_graphs, save_graphs
from dgl.distributed import DistGraphServer, DistGraph
from dgl.distributed import partition_graph, load_partition, load_partition_book, node_split, edge_split
from numpy.testing import assert_almost_equal
import backend as F
import math
import unittest
import pickle
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo', random_state=100) != 0).astype(np.int64)
return dgl.from_scipy(arr)
def run_server(graph_name, server_id, server_count, num_clients, shared_mem):
g = DistGraphServer(server_id, "kv_ip_config.txt", server_count, num_clients,
'/tmp/dist_graph/{}.json'.format(graph_name),
disable_shared_mem=not shared_mem)
print('start server', server_id)
g.start()
def emb_init(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def rand_init(shape, dtype):
return F.tensor(np.random.normal(size=shape), F.float32)
def check_dist_graph_empty(g, num_clients, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.ndata['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')
del test3
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert g.node_attr_schemes()['test1'].dtype == F.int32
print('end')
def run_client_empty(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph_empty(g, num_clients, num_nodes, num_edges)
def check_server_client_empty(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_1'
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_empty, args=(graph_name, 0, num_servers, num_clients,
g.number_of_nodes(), g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
def run_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph(g, num_clients, num_nodes, num_edges)
def run_emb_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_emb(g, num_clients, num_nodes, num_edges)
def run_client_hierarchy(graph_name, part_id, server_count, node_mask, edge_mask, return_dict):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
node_mask = F.tensor(node_mask)
edge_mask = F.tensor(edge_mask)
nodes = node_split(node_mask, g.get_partition_book(), node_trainer_ids=g.ndata['trainer_id'])
edges = edge_split(edge_mask, g.get_partition_book(), edge_trainer_ids=g.edata['trainer_id'])
rank = g.rank()
return_dict[rank] = (nodes, edges)
def check_dist_emb(g, num_clients, num_nodes, num_edges):
from dgl.distributed.optim import SparseAdagrad
from dgl.distributed import DistEmbedding
# Test sparse emb
try:
emb = DistEmbedding(g.number_of_nodes(), 1, 'emb1', emb_init)
nids = F.arange(0, int(g.number_of_nodes()))
lr = 0.001
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats = emb(nids)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
policy = dgl.distributed.PartitionPolicy('node', g.get_partition_book())
grad_sum = dgl.distributed.DistTensor((g.number_of_nodes(), 1), F.float32,
'emb1_sum', policy)
if num_clients == 1:
assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)) * num_clients)
assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))
emb = DistEmbedding(g.number_of_nodes(), 1, 'emb2', emb_init)
with F.no_grad():
feats1 = emb(nids)
assert np.all(F.asnumpy(feats1) == 0)
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats1 = emb(nids)
feats2 = emb(nids)
feats = F.cat([feats1, feats2], 0)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
with F.no_grad():
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * 1 * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
except NotImplementedError as e:
pass
except Exception as e:
print(e)
sys.exit(-1)
def check_dist_graph(g, num_clients, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats1 = g.ndata['features'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges() / 2))
feats1 = g.edata['features'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.ndata['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# reference to a one that exists
test2 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2', init_func=rand_init)
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2')
assert np.all(F.asnumpy(test2[nids]) == F.asnumpy(test3[nids]))
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')
del test3
# add tests for anonymous distributed tensor.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
data = test3[0:10]
test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
del test3
test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
assert np.sum(F.asnumpy(test5[0:10] != data)) > 0
# test a persistent tesnor
test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,
persistent=True)
del test4
try:
test4 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test4')
raise Exception('')
except:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.ndata['features']) == g.number_of_nodes()
assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
assert g.ndata['features'].dtype == F.int64
assert g.node_attr_schemes()['features'].dtype == F.int64
assert g.node_attr_schemes()['test1'].dtype == F.int32
assert g.node_attr_schemes()['features'].shape == (1,)
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book())
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes())
for n in nodes:
assert n in local_nids
print('end')
def check_dist_emb_server_client(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_emb_client, args=(graph_name, 0, num_servers, num_clients,
g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
assert p.exitcode == 0
for p in serv_ps:
p.join()
print('clients have terminated')
def check_server_client(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client, args=(graph_name, 0, num_servers, num_clients, g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
def check_server_client_hierarchy(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph', num_trainers_per_machine=num_clients)
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
manager = mp.Manager()
return_dict = manager.dict()
node_mask = np.zeros((g.number_of_nodes(),), np.int32)
edge_mask = np.zeros((g.number_of_edges(),), np.int32)
nodes = np.random.choice(g.number_of_nodes(), g.number_of_nodes() // 10, replace=False)
edges = np.random.choice(g.number_of_edges(), g.number_of_edges() // 10, replace=False)
node_mask[nodes] = 1
edge_mask[edges] = 1
nodes = np.sort(nodes)
edges = np.sort(edges)
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_hierarchy, args=(graph_name, 0, num_servers,
node_mask, edge_mask, return_dict))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
nodes1 = []
edges1 = []
for n, e in return_dict.values():
nodes1.append(n)
edges1.append(e)
nodes1, _ = F.sort_1d(F.cat(nodes1, 0))
edges1, _ = F.sort_1d(F.cat(edges1, 0))
assert np.all(F.asnumpy(nodes1) == nodes)
assert np.all(F.asnumpy(edges1) == edges)
print('clients have terminated')
def run_client_hetero(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph_hetero(g, num_clients, num_nodes, num_edges)
def create_random_hetero():
num_nodes = {'n1': 10000, 'n2': 10010, 'n3': 10020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo',
random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_nodes('n1')), 1)
g.edges['r1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_edges('r1')), 1)
return g
def check_dist_graph_hetero(g, num_clients, num_nodes, num_edges):
# Test API
for ntype in num_nodes:
assert ntype in g.ntypes
assert num_nodes[ntype] == g.number_of_nodes(ntype)
for etype in num_edges:
assert etype in g.etypes
assert num_edges[etype] == g.number_of_edges(etype)
assert g.number_of_nodes() == sum([num_nodes[ntype] for ntype in num_nodes])
assert g.number_of_edges() == sum([num_edges[etype] for etype in num_edges])
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes('n1') / 2))
feats1 = g.nodes['n1'].data['feat'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges('r1') / 2))
feats1 = g.edges['r1'].data['feat'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes('n1'), 2)
g.nodes['n1'].data['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)
feats = g.nodes['n1'].data['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test3')
del test3
# add tests for anonymous distributed tensor.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
data = test3[0:10]
test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
del test3
test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
assert np.sum(F.asnumpy(test5[0:10] != data)) > 0
# test a persistent tesnor
test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,
persistent=True)
del test4
try:
test4 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test4')
raise Exception('')
except:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.nodes['n1'].data['test1'][nids] = new_feats
feats = g.nodes['n1'].data['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.nodes['n1'].data['feat']) == g.number_of_nodes('n1')
assert g.nodes['n1'].data['feat'].shape == (g.number_of_nodes('n1'), 1)
assert g.nodes['n1'].data['feat'].dtype == F.int64
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes('n1')) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book(), ntype='n1')
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes('n1'))
for n in nodes:
assert n in local_nids
print('end')
def check_server_client_hetero(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_hetero()
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
num_nodes = {ntype: g.number_of_nodes(ntype) for ntype in g.ntypes}
num_edges = {etype: g.number_of_edges(etype) for etype in g.etypes}
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_hetero, args=(graph_name, 0, num_servers, num_clients, num_nodes,
num_edges))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_server_client_hierarchy(False, 1, 4)
check_server_client_empty(True, 1, 1)
check_server_client_hetero(True, 1, 1)
check_server_client_hetero(False, 1, 1)
check_server_client(True, 1, 1)
check_server_client(False, 1, 1)
check_server_client(True, 2, 2)
check_server_client(False, 2, 2)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support distributed DistEmbedding")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Mxnet doesn't support distributed DistEmbedding")
def test_dist_emb_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_dist_emb_server_client(True, 1, 1)
check_dist_emb_server_client(False, 1, 1)
check_dist_emb_server_client(True, 2, 2)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_standalone():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dgl.distributed.initialize("kv_ip_config.txt")
dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))
try:
check_dist_graph(dist_g, 1, g.number_of_nodes(), g.number_of_edges())
except Exception as e:
print(e)
dgl.distributed.exit_client() # this is needed since there's two test here in one process
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support distributed DistEmbedding")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Mxnet doesn't support distributed DistEmbedding")
def test_standalone_node_emb():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dgl.distributed.initialize("kv_ip_config.txt")
dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))
try:
check_dist_emb(dist_g, 1, g.number_of_nodes(), g.number_of_edges())
except Exception as e:
print(e)
dgl.distributed.exit_client() # this is needed since there's two test here in one process
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_split():
#prepare_dist()
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
# The code now collects the roles of all client processes and use the information
# to determine how to split the workloads. Here is to simulate the multi-client
# use case.
def set_roles(num_clients):
dgl.distributed.role.CUR_ROLE = 'default'
dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}
dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}
for i in range(num_parts):
set_roles(num_parts)
part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes1 = np.intersect1d(selected_nodes, F.asnumpy(local_nids))
nodes2 = node_split(node_mask, gpb, rank=i, force_even=False)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes2)))
local_nids = F.asnumpy(local_nids)
for n in nodes1:
assert n in local_nids
set_roles(num_parts * 2)
nodes3 = node_split(node_mask, gpb, rank=i * 2, force_even=False)
nodes4 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=False)
nodes5 = F.cat([nodes3, nodes4], 0)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes5)))
set_roles(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges1 = np.intersect1d(selected_edges, F.asnumpy(local_eids))
edges2 = edge_split(edge_mask, gpb, rank=i, force_even=False)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges2)))
local_eids = F.asnumpy(local_eids)
for e in edges1:
assert e in local_eids
set_roles(num_parts * 2)
edges3 = edge_split(edge_mask, gpb, rank=i * 2, force_even=False)
edges4 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=False)
edges5 = F.cat([edges3, edges4], 0)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges5)))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_split_even():
#prepare_dist(1)
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
all_nodes1 = []
all_nodes2 = []
all_edges1 = []
all_edges2 = []
# The code now collects the roles of all client processes and use the information
# to determine how to split the workloads. Here is to simulate the multi-client
# use case.
def set_roles(num_clients):
dgl.distributed.role.CUR_ROLE = 'default'
dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}
dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}
for i in range(num_parts):
set_roles(num_parts)
part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes = node_split(node_mask, gpb, rank=i, force_even=True)
all_nodes1.append(nodes)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(local_nids))
print('part {} get {} nodes and {} are in the partition'.format(i, len(nodes), len(subset)))
set_roles(num_parts * 2)
nodes1 = node_split(node_mask, gpb, rank=i * 2, force_even=True)
nodes2 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=True)
nodes3, _ = F.sort_1d(F.cat([nodes1, nodes2], 0))
all_nodes2.append(nodes3)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(nodes3))
print('intersection has', len(subset))
set_roles(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges = edge_split(edge_mask, gpb, rank=i, force_even=True)
all_edges1.append(edges)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(local_eids))
print('part {} get {} edges and {} are in the partition'.format(i, len(edges), len(subset)))
set_roles(num_parts * 2)
edges1 = edge_split(edge_mask, gpb, rank=i * 2, force_even=True)
edges2 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=True)
edges3, _ = F.sort_1d(F.cat([edges1, edges2], 0))
all_edges2.append(edges3)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(edges3))
print('intersection has', len(subset))
all_nodes1 = F.cat(all_nodes1, 0)
all_edges1 = F.cat(all_edges1, 0)
all_nodes2 = F.cat(all_nodes2, 0)
all_edges2 = F.cat(all_edges2, 0)
all_nodes = np.nonzero(node_mask)[0]
all_edges = np.nonzero(edge_mask)[0]
assert np.all(all_nodes == F.asnumpy(all_nodes1))
assert np.all(all_edges == F.asnumpy(all_edges1))
assert np.all(all_nodes == F.asnumpy(all_nodes2))
assert np.all(all_edges == F.asnumpy(all_edges2))
def prepare_dist():
ip_config = open("kv_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('{}\n'.format(ip_addr))
ip_config.close()
if __name__ == '__main__':
os.makedirs('/tmp/dist_graph', exist_ok=True)
test_dist_emb_server_client()
test_server_client()
test_split()
test_split_even()
test_standalone()
test_standalone_node_emb()
|
mock_server.py
|
import logging
import queue
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
from multiprocessing import Process, Queue
from .pact_request_handler import PactRequestHandler
_providers = {}
log = logging.getLogger(__name__)
def getMockServer(pact):
if pact.provider.name not in _providers:
_providers[pact.provider.name] = Server(pact)
return _providers[pact.provider.name]
class Server:
def __init__(self, pact):
self.pact = pact
self.interactions = Queue()
self.results = Queue()
self.process = Process(target=run_server, args=(pact, self.interactions, self.results))
self.process.start()
def setup(self, interactions):
for interaction in interactions:
self.interactions.put_nowait(interaction)
def verify(self):
while not self.results.empty():
result = self.results.get()
if result['status'] == 'error':
raise MockServer.Error(result['reason'])
if result['status'] == 'failed':
raise AssertionError(result['reason'])
def terminate(self):
self.process.terminate()
def run_server(pact, interactions, results):
httpd = MockServer(pact, interactions, results)
httpd.serve_forever()
class MockServer(HTTPServer):
def __init__(self, pact, interactions, results):
self.pact = pact
self.incoming_interactions = interactions
self.outgoing_results = results
server_address = ('', pact.port)
super().__init__(server_address, MockHTTPRequestHandler)
self.interactions = []
self.log = logging.getLogger(__name__ + '.' + pact.provider.name)
self.log.addHandler(logging.FileHandler(f'{pact.log_dir}/{pact.provider.name}.log'))
self.log.setLevel(logging.DEBUG)
self.log.propagate = False
class Error(Exception):
pass
class MockHTTPRequestHandler(BaseHTTPRequestHandler, PactRequestHandler):
def __init__(self, request, client_address, server):
self.response_status_code = None
self.response_headers = {}
self.response_body = None
PactRequestHandler.__init__(self, server.pact)
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def error_result(self, message, content='', status='error', status_code=500):
self.server.outgoing_results.put({'status': status, 'reason': message})
self.response_status_code = status_code
self.response_headers = {'Content-Type': 'text/plain; charset=utf-8'}
self.response_body = (content or message).encode('utf8')
def run_request(self, method):
try:
self.body = None
for header in self.headers:
if header.lower() == 'content-length':
self.body = self.rfile.read(int(self.headers[header]))
self.validate_request(method)
except AssertionError as e:
self.error_result(str(e))
except Exception as e:
self.error_result(f'Internal Error: {e}', traceback.format_exc())
self.send_response(self.response_status_code)
for header in self.response_headers:
self.send_header(header, self.response_headers[header])
self.end_headers()
if self.response_body:
self.wfile.write(self.response_body)
def get_interaction(self, path):
try:
interaction = self.server.incoming_interactions.get(False)
except queue.Empty:
raise AssertionError(f'Request at {path} received but no interaction registered') from None
return interaction
def handle_success(self, interaction):
self.server.outgoing_results.put({'status': 'success'})
def handle_failure(self, reason):
self.error_result(reason, status='failed', status_code=418)
def respond_for_interaction(self, interaction):
self.response_status_code = interaction['response']['status']
if 'headers' in interaction['response']:
self.response_headers.update(interaction['response']['headers'])
if 'body' in interaction['response']:
self.response_body = self.handle_response_encoding(interaction['response'], self.response_headers)
def do_DELETE(self):
self.run_request('DELETE')
def do_GET(self):
self.run_request('GET')
def do_HEAD(self):
self.run_request('HEAD')
def do_POST(self):
self.run_request('POST')
def do_PUT(self):
self.run_request('PUT')
def do_PATCH(self):
self.run_request('PATCH')
def log_message(self, format, *args):
self.server.log.info("MockServer %s\n" % format % args)
|
utils.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import functools
import importlib
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from functools import partial
from subprocess import PIPE, Popen
from typing import Callable, Optional, Tuple
from urllib.error import HTTPError, URLError
import numpy as np
import torch
import torch.distributed as dist
from monai.config import NdarrayTensor
from monai.config.deviceconfig import USE_COMPILED
from monai.config.type_definitions import NdarrayOrTensor
from monai.data import create_test_image_2d, create_test_image_3d
from monai.networks import convert_to_torchscript
from monai.utils import optional_import
from monai.utils.module import pytorch_after, version_leq
from monai.utils.type_conversion import convert_data_type
nib, _ = optional_import("nibabel")
quick_test_var = "QUICKTEST"
_tf32_enabled = None
def clone(data: NdarrayTensor) -> NdarrayTensor:
"""
Clone data independent of type.
Args:
data (NdarrayTensor): This can be a Pytorch Tensor or numpy array.
Returns:
Any: Cloned data object
"""
return copy.deepcopy(data)
def assert_allclose(
actual: NdarrayOrTensor,
desired: NdarrayOrTensor,
type_test: bool = True,
device_test: bool = False,
*args,
**kwargs,
):
"""
Assert that types and all values of two data objects are close.
Args:
actual: Pytorch Tensor or numpy array for comparison.
desired: Pytorch Tensor or numpy array to compare against.
type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
device_test: whether to test the device property.
args: extra arguments to pass on to `np.testing.assert_allclose`.
kwargs: extra arguments to pass on to `np.testing.assert_allclose`.
"""
if type_test:
# check both actual and desired are of the same type
np.testing.assert_equal(isinstance(actual, np.ndarray), isinstance(desired, np.ndarray), "numpy type")
np.testing.assert_equal(isinstance(actual, torch.Tensor), isinstance(desired, torch.Tensor), "torch type")
if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
if device_test:
np.testing.assert_equal(str(actual.device), str(desired.device), "torch device check") # type: ignore
actual = actual.cpu().numpy() if isinstance(actual, torch.Tensor) else actual
desired = desired.cpu().numpy() if isinstance(desired, torch.Tensor) else desired
np.testing.assert_allclose(actual, desired, *args, **kwargs)
def test_pretrained_networks(network, input_param, device):
try:
return network(**input_param).to(device)
except (URLError, HTTPError) as e:
raise unittest.SkipTest(e) from e
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def is_tf32_env():
"""
The environment variable NVIDIA_TF32_OVERRIDE=0 will override any defaults
or programmatic configuration of NVIDIA libraries, and consequently,
cuBLAS will not accelerate FP32 computations with TF32 tensor cores.
"""
global _tf32_enabled
if _tf32_enabled is None:
_tf32_enabled = False
if (
torch.cuda.is_available()
and not version_leq(f"{torch.version.cuda}", "10.100")
and os.environ.get("NVIDIA_TF32_OVERRIDE", "1") != "0"
and torch.cuda.device_count() > 0 # at least 11.0
):
try:
# with TF32 enabled, the speed is ~8x faster, but the precision has ~2 digits less in the result
g_gpu = torch.Generator(device="cuda")
g_gpu.manual_seed(2147483647)
a_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
b_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
_tf32_enabled = (a_full.float() @ b_full.float() - a_full @ b_full).abs().max().item() > 0.001 # 0.1713
except BaseException:
pass
print(f"tf32 enabled: {_tf32_enabled}")
return _tf32_enabled
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
self.version_too_old = not pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than or equal to that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
self.version_too_new = pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def make_nifti_image(array: NdarrayOrTensor, affine=None):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if isinstance(array, torch.Tensor):
array, *_ = convert_data_type(array, np.ndarray)
if isinstance(affine, torch.Tensor):
affine, *_ = convert_data_type(affine, np.ndarray)
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
temp_f, image_name = tempfile.mkstemp(suffix=".nii.gz")
nib.save(test_image, image_name)
os.close(temp_f)
return image_name
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random.random.__self__ if random_state is None else random_state # type: ignore
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
torch.cuda.set_device(int(local_rank))
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
processes = []
results = tmp.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = tmp.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results), daemon=self.daemon
)
p.start()
processes.append(p)
for p in processes:
p.join()
assert results.get(), "Distributed call failed."
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = tmp.Queue()
p = tmp.Process(target=TimedCall.run_process, args=(func, args, kwargs, results), daemon=self.daemon)
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
global _original_funcs
_original_funcs[obj.__name__] = obj
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(
self.im_shape[0], self.im_shape[1], num_objs=4, rad_max=20, noise_max=0.0, num_seg_classes=self.num_classes
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(
self.im_shape[0],
self.im_shape[1],
self.im_shape[2],
num_objs=4,
rad_max=20,
noise_max=0.0,
num_seg_classes=self.num_classes,
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, device=None, rtol=1e-4, atol=0.0):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned.
The forward pass for both is done without gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
# TODO: would be nice to use GPU if available, but it currently causes CI failures.
device = "cpu"
with tempfile.TemporaryDirectory() as tempdir:
convert_to_torchscript(
model=net,
filename_or_obj=os.path.join(tempdir, "model.ts"),
verify=True,
inputs=inputs,
device=device,
rtol=rtol,
atol=atol,
)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids.
"""
bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
free_memory[1] += free_memory[0] # combine 0/1 column measures
ids = np.lexsort(free_memory)[:n]
except (TypeError, IndexError, OSError):
ids = range(n) if isinstance(n, int) else []
return ",".join(f"{int(x)}" for x in ids)
TEST_NDARRAYS: Tuple[Callable] = (np.array, torch.as_tensor) # type: ignore
if torch.cuda.is_available():
gpu_tensor: Callable = partial(torch.as_tensor, device="cuda")
TEST_NDARRAYS = TEST_NDARRAYS + (gpu_tensor,) # type: ignore
if __name__ == "__main__":
print(query_memory())
|
test_socket_connection.py
|
import functools
import threading
import time
import logging
import socket
import struct
import sys
import unittest
import zlib
import pytest
import ipaddress
import netifaces
from boofuzz.socket_connection import SocketConnection
from boofuzz import socket_connection
from boofuzz import ip_constants
from boofuzz import helpers
THREAD_WAIT_TIMEOUT = 10 # Time to wait for a thread before considering it failed.
ETH_P_ALL = 0x0003 # Ethernet protocol: Every packet, see Linux if_ether.h docs for more details.
UDP_HEADER_LEN = 8
IP_HEADER_LEN = 20
ETHER_TYPE_IPV4 = struct.pack(">H", socket_connection.ETH_P_IP) # Ethernet frame EtherType for IPv4
RAW_L2_MAX_PAYLOAD = socket_connection.SocketConnection.MAX_PAYLOADS['raw-l2']
RAW_L3_MAX_PAYLOAD = socket_connection.SocketConnection.MAX_PAYLOADS['raw-l3']
TEST_ERR_NO_NON_LOOPBACK_IPV4 = 'No local non-loopback IPv4 address found.'
def bytes_or_unicode_to_unicode(s):
if isinstance(s, bytes):
return s.decode('utf-8')
else:
return s
def get_local_non_loopback_ipv4_addresses_info():
for interface in netifaces.interfaces():
# Not all interfaces have an IPv4 address:
if netifaces.AF_INET in netifaces.ifaddresses(interface):
# Some interfaces have multiple IPv4 addresses:
for address_info in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
# netifaces gives unicode strings in Windows, byte strings in Linux:
address_str = bytes_or_unicode_to_unicode(address_info['addr'])
if not ipaddress.IPv4Address(address_str).is_loopback:
yield address_info
def udp_packet(payload, src_port, dst_port):
"""
Create a UDP packet.
:param payload: Payload / next layer protocol.
:type payload: str
:param src_port: 16-bit source port number.
:type src_port: int
:param dst_port: 16-bit destination port number.
:type dst_port: int
:return: UDP packet.
:rtype: str
"""
udp_header = struct.pack(">H", src_port) # Src port
udp_header += struct.pack(">H", dst_port) # Dst port
udp_header += struct.pack(">H", len(payload) + UDP_HEADER_LEN) # Length
udp_header += "\x00\x00" # Checksum (0 means no checksum)
return udp_header + payload
def ones_complement_sum_carry_16(a, b):
"""
Compute ones complement and carry at 16 bits.
:type a: int
:type b: int
:return: Sum of a and b, ones complement, carry at 16 bits.
"""
c = a + b
return (c & 0xffff) + (c >> 16)
def ip_packet(payload, src_ip, dst_ip, protocol=chr(ip_constants.IPV4_PROTOCOL_UDP)):
"""
Create an IPv4 packet.
:type payload: str
:param payload: Contents of next layer up.
:type src_ip: str
:param src_ip: 4-byte source IP address.
:type dst_ip: str
:param dst_ip: 4-byte destination IP address.
:type protocol: str
:param protocol: Single-byte string identifying next layer's protocol. Default "\x11" UDP.
:return: IPv4 packet.
:rtype: str
"""
ip_header = "\x45" # Version | Header Length
ip_header += "\x00" # "Differentiated Services Field"
ip_header += struct.pack(">H", IP_HEADER_LEN + len(payload)) # Length
ip_header += "\x00\x01" # ID Field
ip_header += "\x40\x00" # Flags, Fragment Offset
ip_header += "\x40" # Time to live
ip_header += protocol
ip_header += "\x00\x00" # Header checksum (fill in zeros in order to compute checksum)
ip_header += src_ip
ip_header += dst_ip
checksum = struct.pack(">H", helpers.ipv4_checksum(ip_header))
ip_header = ip_header[:10] + checksum + ip_header[12:]
return ip_header + payload
def ethernet_frame(payload, src_mac, dst_mac, ether_type=ETHER_TYPE_IPV4):
"""
Create an Ethernet frame.
:param payload: Network layer content.
:type payload: str
:param src_mac: 6-byte source MAC address.
:type src_mac: str
:param dst_mac: 6-byte destination MAC address.
:type dst_mac: str
:param ether_type: EtherType indicating protocol of next layer; default "\x08\x00" IPv4.
:type ether_type: str
:return: Ethernet frame
:rtype: str
"""
eth_header = dst_mac
eth_header += src_mac
eth_header += ether_type
raw_packet = eth_header + payload
# Ethernet frame check sequence
crc = zlib.crc32(raw_packet) & 0xFFFFFFFF
raw_packet += struct.pack("<I", crc)
return raw_packet
class MiniTestServer(object):
"""
Small server class for testing SocketConnection.
"""
def __init__(self, stay_silent=False, proto='tcp', host="0.0.0.0"):
self.server_socket = None
self.received = None
self.data_to_send = bytes("\xFE\xEB\xDA\xED")
self.active_port = None
self.stay_silent = stay_silent
self.proto = proto
self.host = host
self.timeout = 5 # Timeout while waiting for the unit test packets.
def bind(self):
"""
Bind server, and call listen if using TCP, meaning that the client test code can successfully connect.
"""
if self.proto == 'tcp':
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif self.proto == 'udp':
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
elif self.proto == 'raw':
self.server_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
else:
raise Exception("Invalid protocol type: '{0}'".format(self.proto))
self.server_socket.bind((self.host, 0)) # let OS choose a free port
if self.proto == 'tcp':
self.server_socket.listen(1)
self.active_port = self.server_socket.getsockname()[1]
def serve_once(self):
"""
Serve one connection and send a reply, unless stay_silent is set.
:return:
"""
self.server_socket.settimeout(self.timeout)
if self.proto == 'tcp':
(client_socket, address) = self.server_socket.accept()
self.received = client_socket.recv(10000)
if not self.stay_silent:
client_socket.send(self.data_to_send)
client_socket.close()
elif self.proto == 'udp':
data, addr = self.server_socket.recvfrom(1024)
self.received = data
if not self.stay_silent:
self.server_socket.sendto(self.data_to_send, addr)
elif self.proto == 'raw':
data, addr = self.server_socket.recvfrom(10000)
self.received = data
if not self.stay_silent:
self.server_socket.sendto(self.data_to_send, addr)
else:
raise Exception("Invalid protocol type: '{0}'".format(self.proto))
self.server_socket.close()
self.server_socket = None
self.active_port = None
def receive_until(self, expected):
"""Receive repeatedly until expected is received.
This is handy for a noisy socket (e.g., layer 2 or layer 3 sockets that
receive data from multiple applications).
Will send a reply to first connection, unless stay_silent is set.
Puts received data in self.received if and only if expected is
received.
@param expected: Expected value to look for.
"""
self.server_socket.settimeout(self.timeout)
if self.proto == 'raw':
# Keep receiving
elapsed_time = 0
start_time = time.time()
while elapsed_time < self.timeout:
self.server_socket.settimeout(self.timeout - elapsed_time)
try:
data, addr = self.server_socket.recvfrom(10000)
if data == expected:
self.received = data
if not self.stay_silent:
self.server_socket.sendto(self.data_to_send, addr)
break
except socket.timeout:
break
elapsed_time = time.time() - start_time
else:
raise Exception("Invalid protocol type: '{0}'".format(self.proto))
self.server_socket.close()
self.server_socket = None
self.active_port = None
class TestSocketConnection(unittest.TestCase):
"""
Tests only use loopback interface 'lo', since other interfaces would be
hardware or network dependent.
"""
def test_tcp_client(self):
"""
Given: A SocketConnection 'tcp' object and a TCP server.
When: Calling SocketConnection.open(), .send(), .recv(), and .close()
Then: send() returns RAW_L3_MAX_PAYLOAD.
and: Sent and received data is as expected.
"""
data_to_send = bytes('uuddlrlrba')
# Given
server = MiniTestServer()
server.bind()
t = threading.Thread(target=server.serve_once)
t.daemon = True
t.start()
uut = SocketConnection(host=socket.gethostname(), port=server.active_port, proto='tcp')
uut.logger = logging.getLogger("SulleyUTLogger")
# When
uut.open()
send_result = uut.send(data=data_to_send)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(data_to_send))
self.assertEqual(data_to_send, server.received)
self.assertEqual(received, server.data_to_send)
def test_tcp_client_timeout(self):
"""
Given: A SocketConnection 'tcp' object and a TCP server, set not to respond.
When: Calling SocketConnection.open(), .send(), .recv(), and .close()
Then: send() returns length of payload.
and: Sent works as expected, and recv() returns bytes('') after timing out.
"""
data_to_send = bytes('uuddlrlrba')
# Given
server = MiniTestServer(stay_silent=True)
server.bind()
t = threading.Thread(target=server.serve_once)
t.daemon = True
t.start()
uut = SocketConnection(host=socket.gethostname(), port=server.active_port, proto='tcp')
uut.logger = logging.getLogger("SulleyUTLogger")
# When
uut.open()
send_result = uut.send(data=data_to_send)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(data_to_send))
self.assertEqual(data_to_send, server.received)
self.assertEqual(received, bytes(''))
def test_udp_client(self):
"""
Given: A SocketConnection 'udp' object and a UDP server.
When: Calling SocketConnection.open(), .send(), .recv(), and .close()
Then: send() returns length of payload.
and: Sent and received data is as expected.
"""
data_to_send = bytes('"Rum idea this is, that tidiness is a timid, quiet sort of thing;'
' why, tidiness is a toil for giants."')
# Given
server = MiniTestServer(proto='udp')
server.bind()
t = threading.Thread(target=server.serve_once)
t.daemon = True
t.start()
uut = SocketConnection(host=socket.gethostname(), port=server.active_port, proto='udp',
bind=(socket.gethostname(), 0))
uut.logger = logging.getLogger("SulleyUTLogger")
# When
uut.open()
send_result = uut.send(data=data_to_send)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(data_to_send))
self.assertEqual(data_to_send, server.received)
self.assertEqual(received, server.data_to_send)
@pytest.mark.skipif(not any(True for _ in get_local_non_loopback_ipv4_addresses_info()),
reason=TEST_ERR_NO_NON_LOOPBACK_IPV4)
def test_udp_broadcast_client(self):
"""
Given: A SocketConnection 'udp' object with udp_broadcast set, and a UDP server.
When: Calling SocketConnection.open(), .send(), .recv(), and .close()
Then: send() returns length of payload.
and: Sent and received data is as expected.
"""
try:
broadcast_addr = get_local_non_loopback_ipv4_addresses_info().next()['broadcast']
except StopIteration:
assert False, TEST_ERR_NO_NON_LOOPBACK_IPV4
data_to_send = bytes('"Never drink because you need it, for this is rational drinking, and the way to death and'
' hell. But drink because you do not need it, for this is irrational drinking, and the'
' ancient health of the world."')
# Given
server = MiniTestServer(proto='udp', host='')
server.bind()
t = threading.Thread(target=server.serve_once)
t.daemon = True
t.start()
uut = SocketConnection(host=broadcast_addr, port=server.active_port, proto='udp',
bind=('', server.active_port + 1), udp_broadcast=True)
uut.logger = logging.getLogger("BoofuzzUTLogger")
# When
uut.open()
send_result = uut.send(data=data_to_send)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(data_to_send))
self.assertEqual(data_to_send, server.received)
self.assertEqual(received, server.data_to_send)
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l2(self):
"""
Test 'raw' protocol with the loopback interface 'lo'.
Given: A SocketConnection 'raw-l2' object.
and: A raw UDP/IP/Ethernet packet.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns length of payload.
and: The server receives the raw packet data from send().
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('"Imagination does not breed insanity. Exactly what does breed insanity is reason.'
' Poets do not go mad; but chess-players do. Mathematicians go mad, and cashiers;'
' but creative artists very seldom. "')
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l2')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = ethernet_frame(
payload=ip_packet(
payload=udp_packet(
payload=data_to_send,
src_port=server.active_port + 1,
dst_port=server.active_port),
src_ip="\x7F\x00\x00\x01",
dst_ip="\x7F\x00\x00\x01"),
src_mac="\x00" * 6,
dst_mac="\xff" * 6)
expected_server_receive = raw_packet
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(expected_server_receive))
self.assertEqual(raw_packet, server.received)
self.assertEqual(received, bytes(''))
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l2_max_size(self):
"""
Test 'raw-l2' max packet size.
Given: A SocketConnection 'raw-l2' object.
and: A raw UDP/IP/Ethernet packet of RAW_L2_MAX_PAYLOAD bytes.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns RAW_L2_MAX_PAYLOAD.
and: The server receives the raw packet data from send().
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('1' * RAW_L2_MAX_PAYLOAD)
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l2')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = data_to_send
expected_server_receive = raw_packet
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, RAW_L2_MAX_PAYLOAD)
self.assertEqual(expected_server_receive, server.received)
self.assertEqual(received, bytes(''))
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l2_oversized(self):
"""
Test 'raw-l2' oversized packet handling.
Given: A SocketConnection 'raw-l2' object.
and: A raw UDP/IP/Ethernet packet of RAW_L2_MAX_PAYLOAD + 1 bytes.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns RAW_L2_MAX_PAYLOAD.
and: The server receives the first RAW_L2_MAX_PAYLOAD bytes of raw packet data from send().
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('F' * (RAW_L2_MAX_PAYLOAD + 1))
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l2')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = data_to_send
expected_server_receive = raw_packet[:RAW_L2_MAX_PAYLOAD]
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, RAW_L2_MAX_PAYLOAD)
self.assertEqual(expected_server_receive, server.received)
self.assertEqual(received, bytes(''))
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l3(self):
"""
Test 'raw' protocol with the loopback interface 'lo'.
Given: A SocketConnection 'raw-l3' object.
and: A raw UDP/IP packet.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns length of payload.
and: The server receives the raw packet data from send(), with an Ethernet header appended.
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('"Imprudent marriages!" roared Michael. "And pray where in earth or heaven are there any'
' prudent marriages?""')
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l3')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = ip_packet(
payload=udp_packet(
payload=data_to_send,
src_port=server.active_port + 1,
dst_port=server.active_port),
src_ip="\x7F\x00\x00\x01",
dst_ip="\x7F\x00\x00\x01")
expected_server_receive = '\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x08\x00' + raw_packet
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, len(raw_packet))
self.assertEqual(expected_server_receive, server.received)
self.assertEqual(received, bytes(''))
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l3_max_size(self):
"""
Test 'raw-l3' max packet size.
Given: A SocketConnection 'raw-l3' object.
and: A raw UDP/IP packet of RAW_L3_MAX_PAYLOAD bytes.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns RAW_L3_MAX_PAYLOAD bytes.
and: The server receives the raw packet data from send(), with an Ethernet header appended.
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('0' * RAW_L3_MAX_PAYLOAD)
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l3')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = data_to_send
expected_server_receive = '\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x08\x00' + raw_packet
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, RAW_L3_MAX_PAYLOAD)
self.assertEqual(expected_server_receive, server.received)
self.assertEqual(received, bytes(''))
@pytest.mark.skipif(sys.platform == 'win32',
reason="Raw sockets not supported on Windows.")
def test_raw_l3_oversized(self):
"""
Test 'raw-l3' max packet size.
Given: A SocketConnection 'raw-l3' object.
and: A raw UDP/IP packet of RAW_L3_MAX_PAYLOAD + 1 bytes.
and: A server socket created with AF_PACKET, SOCK_RAW, configured to respond.
When: Calling SocketConnection.open(), .send() with the valid UDP packet, .recv(), and .close()
Then: send() returns RAW_L3_MAX_PAYLOAD.
and: The server receives the raw packet data from send(), with an Ethernet header appended.
and: SocketConnection.recv() returns bytes('').
"""
data_to_send = bytes('D' * (RAW_L3_MAX_PAYLOAD + 1))
# Given
server = MiniTestServer(proto='raw', host='lo')
server.data_to_send = "GKC"
server.bind()
uut = SocketConnection(host="lo", port=socket_connection.ETH_P_IP, proto='raw-l3')
uut.logger = logging.getLogger("SulleyUTLogger")
# Assemble packet...
raw_packet = data_to_send
expected_server_receive = '\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x08\x00' + raw_packet[
:RAW_L3_MAX_PAYLOAD]
t = threading.Thread(target=functools.partial(server.receive_until, expected_server_receive))
t.daemon = True
t.start()
# When
uut.open()
send_result = uut.send(data=raw_packet)
received = uut.recv(10000)
uut.close()
# Wait for the other thread to terminate
t.join(THREAD_WAIT_TIMEOUT)
self.assertFalse(t.isAlive())
# Then
self.assertEqual(send_result, RAW_L3_MAX_PAYLOAD)
self.assertEqual(expected_server_receive,
server.received)
self.assertEqual(received, bytes(''))
def test_required_args_port(self):
"""
Given: No preconditions.
When: Constructing SocketConnections with:
protocol types in [default, 'udp', 'tcp', 'ssl'] and
no port argument.
Then: Constructor raises exception.
"""
with self.assertRaises(Exception):
SocketConnection(host='127.0.0.1')
with self.assertRaises(Exception):
SocketConnection(host='127.0.0.1', proto='tcp')
with self.assertRaises(Exception):
SocketConnection(host='127.0.0.1', proto='udp')
with self.assertRaises(Exception):
SocketConnection(host='127.0.0.1', proto='ssl')
def test_optional_args_port(self):
"""
Given: No preconditions.
When: Constructing SocketConnections with:
protocol types in ['raw-l2', 'raw-l3'] and
no port argument.
Then: Constructor raises no exception.
"""
SocketConnection(host='127.0.0.1', proto='raw-l2')
SocketConnection(host='127.0.0.1', proto='raw-l3')
def test_required_args_host(self):
"""
Given: No preconditions.
When: Constructing SocketConnections with:
protocol types in [default, 'udp', 'tcp', 'ssl', 'raw-l2', 'raw-l3] and
no host argument.
Then: Constructor raises exception.
"""
# This method tests bad argument lists. Therefore we ignore
# PyArgumentList inspections.
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5)
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5, proto='tcp')
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5, proto='udp')
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5, proto='ssl')
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5, proto='raw-l2')
with self.assertRaises(Exception):
# noinspection PyArgumentList
SocketConnection(port=5, proto='raw-l3')
if __name__ == '__main__':
unittest.main()
|
conftest.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from contextlib import closing
import os
import socket
import tempfile
import threading
from lxml.builder import ElementMaker, E as B
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.servers import FTPServer
from pyftpdlib.handlers import TLS_FTPHandler
import pytest
import yaml
from carbon.db import engine, metadata, persons, orcids, dlcs, aa_articles
@pytest.fixture(scope="session", autouse=True)
def app_init():
engine.configure("sqlite://")
metadata.bind = engine()
metadata.create_all()
@pytest.fixture(scope="session")
def records():
current_dir = os.path.dirname(os.path.realpath(__file__))
data = os.path.join(current_dir, "fixtures/data.yml")
with open(data) as fp:
r = list(yaml.safe_load_all(fp))
return r
@pytest.fixture(scope="session")
def aa_data():
current_dir = os.path.dirname(os.path.realpath(__file__))
data = os.path.join(current_dir, "fixtures/articles.yml")
with open(data) as fp:
r = list(yaml.safe_load_all(fp))
return r
@pytest.fixture(scope="session")
def _ftp_server():
"""Starts an FTPS server with an empty temp dir.
This fixture returns a tuple with the socketname and the path to the
serving directory. The socketname is a tuple with host and port.
Use the ``ftp_server`` wrapper fixture instead as it will clean the
directory before each test.
"""
s = socket.socket()
s.bind(("", 0))
fixtures = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures")
with tempfile.TemporaryDirectory() as d:
auth = DummyAuthorizer()
auth.add_user("user", "pass", d, perm="elradfmwMT")
handler = TLS_FTPHandler
handler.certfile = os.path.join(fixtures, "server.crt")
handler.keyfile = os.path.join(fixtures, "server.key")
handler.authorizer = auth
server = FTPServer(s, handler)
t = threading.Thread(target=server.serve_forever, daemon=1)
t.start()
yield s.getsockname(), d
@pytest.fixture
def ftp_server(_ftp_server):
"""Wrapper around ``_ftp_server`` to clean directory before each test."""
d = _ftp_server[1]
for f in os.listdir(d):
fpath = os.path.join(d, f)
if os.path.isfile(fpath):
os.unlink(fpath)
return _ftp_server
@pytest.fixture
def load_data(records, aa_data):
with closing(engine().connect()) as conn:
conn.execute(persons.delete())
conn.execute(orcids.delete())
conn.execute(dlcs.delete())
conn.execute(aa_articles.delete())
for r in records:
conn.execute(persons.insert(), r["person"])
conn.execute(orcids.insert(), r["orcid"])
conn.execute(dlcs.insert(), r["dlc"])
conn.execute(aa_articles.insert(), aa_data)
yield
with closing(engine().connect()) as conn:
conn.execute(persons.delete())
conn.execute(orcids.delete())
conn.execute(dlcs.delete())
conn.execute(aa_articles.delete())
@pytest.fixture
def xml_records(E):
return [
E.record(
E.field("123456", {"name": "[Proprietary_ID]"}),
E.field("FOOBAR", {"name": "[Username]"}),
E.field("F B", {"name": "[Initials]"}),
E.field("Gaz", {"name": "[LastName]"}),
E.field("Foobar", {"name": "[FirstName]"}),
E.field("foobar@example.com", {"name": "[Email]"}),
E.field("MIT", {"name": "[AuthenticatingAuthority]"}),
E.field("1", {"name": "[IsAcademic]"}),
E.field("1", {"name": "[IsCurrent]"}),
E.field("1", {"name": "[LoginAllowed]"}),
E.field("Chemistry Faculty", {"name": "[PrimaryGroupDescriptor]"}),
E.field("2001-01-01", {"name": "[ArriveDate]"}),
E.field("2010-01-01", {"name": "[LeaveDate]"}),
E.field("http://example.com/1", {"name": "[Generic01]"}),
E.field("CFAT", {"name": "[Generic02]"}),
E.field("SCIENCE AREA", {"name": "[Generic03]"}),
E.field("Chemistry", {"name": "[Generic04]"}),
E.field(name="[Generic05]"),
),
E.record(
E.field("098754", name="[Proprietary_ID]"),
E.field("THOR", name="[Username]"),
E.field(u"Þ H", name="[Initials]"),
E.field("Hammerson", name="[LastName]"),
E.field(u"Þorgerðr", name="[FirstName]"),
E.field("thor@example.com", name="[Email]"),
E.field("MIT", {"name": "[AuthenticatingAuthority]"}),
E.field("1", {"name": "[IsAcademic]"}),
E.field("1", {"name": "[IsCurrent]"}),
E.field("1", {"name": "[LoginAllowed]"}),
E.field(
"Nuclear Science Non-faculty", {"name": "[PrimaryGroupDescriptor]"}
),
E.field("2015-01-01", {"name": "[ArriveDate]"}),
E.field("2999-12-31", {"name": "[LeaveDate]"}),
E.field("http://example.com/2", {"name": "[Generic01]"}),
E.field("COAC", {"name": "[Generic02]"}),
E.field("ENGINEERING AREA", {"name": "[Generic03]"}),
E.field("Nuclear Science", {"name": "[Generic04]"}),
E.field("Nuclear Science and Engineering", {"name": "[Generic05]"}),
),
]
@pytest.fixture
def xml_data(E, xml_records):
return E.records(*xml_records)
@pytest.fixture
def E():
return ElementMaker(
namespace="http://www.symplectic.co.uk/hrimporter",
nsmap={None: "http://www.symplectic.co.uk/hrimporter"},
)
@pytest.fixture
def articles_data(aa_data):
return B.ARTICLES(
B.ARTICLE(
B.AA_MATCH_SCORE("0.9"),
B.ARTICLE_ID("1234567"),
B.ARTICLE_TITLE(
"Interaction between hatsopoulos microfluids and "
"the Yawning Abyss of Chaos ☈."
),
B.ARTICLE_YEAR("1999"),
B.AUTHORS(u"McRandallson, Randall M.|Lord, Dark|☭"),
B.DOI("10.0000/1234LETTERS56"),
B.ISSN_ELECTRONIC("0987654"),
B.ISSN_PRINT("01234567"),
B.IS_CONFERENCE_PROCEEDING("0"),
B.JOURNAL_FIRST_PAGE("666"),
B.JOURNAL_LAST_PAGE("666"),
B.JOURNAL_ISSUE("10"),
B.JOURNAL_VOLUME("1"),
B.JOURNAL_NAME("Bunnies"),
B.MIT_ID("123456789"),
B.PUBLISHER("MIT Press"),
)
)
@pytest.fixture
def reader():
class Reader:
def __init__(self, fp):
self.fp = fp
self.data = b""
def __call__(self):
while 1:
data = self.fp.read(1024)
if not data:
break
self.data += data
return Reader
|
Servo.py
|
"""
Authors:
Brian Henson
"""
from Pwm_Interface import set_pwm
import threading
import Frame_Thread as ft
# prints an assload of debug statements for the servo with the specified ID
# specifically only frame-thread debug statements
# there is no servo with id -1 so that will turn off debugging
DEBUG_SERVO_ID = -1
# basic clamp
def clamp(value, lower, upper):
return lower if value < lower else upper if value > upper else value
# clamp where you dont know the relative order of a and b
def bidirectional_clamp(val, a, b):
return clamp(val, a, b) if a < b else clamp(val, b, a)
# map a value along one range onto another range
def linear_map(x_in_val, x1, x2, y1, y2):
x_in_val, x1, x2, y1, y2 = float(x_in_val), float(x1), float(x2), float(y1), float(y2)
m = (y2 - y1) / (x2 - x1)
b = y2 - m * x2
return x_in_val * m + b
class Servo(object):
"""
Servo stores the following values:
shield_id: the # of the hat the servo is connected on
channel_id: the # of the channel on the hat the servo is connected on
servo_id: same as "id" from the json, used to derive shield_id and channel_id. should be unique
min_pulse: The minimum pulse the Servo allows
max_pulse: The maximum pulse the Servo allows
min_angle: The minimum degree the Servo can rotate to
max_angle: The maximum degree the Servo can rotate to
default_angle: The rest(initial)position of the servo.
name: The name of the servo
disabled: True if the servo is disabled. Any attempts to move the servo will return without having any effect.
framethread: handle for the background thread to manage gradual movement
state_flag_running: Event object to indicate the gradual movement is occurring
state_flag_idle: Event object to indicate the gradual movement is not occurring
frame_queue: list object filled by the main thread when gradual movement begins, and consumed by the background thread
curr_angle: track the angle the servo was last instructed to move to
curr_pwm: track the pwm the servo was last instructed to set to
curr_on: track the on/off state of the servo, exactly equivalent to bool(curr_pwm) in all cases
curr_state_lock: Lock object to ensure only 1 thread at a time touches the current angle/pwm trackers
state_flag_lock: Lock object to ensure only 1 thread at a time touches the running/idle flags
frame_queue_lock: Lock object to ensure only 1 thread at a time touches the frame_queue
"""
def __init__(self,
servo_id,
min_pulse,
max_pulse,
min_angle,
max_angle,
default_angle,
name,
disabled=False
):
self.disabled = disabled
self.servo_id = servo_id
# servo IDs start from 0, and because there are only 16 channels on a hat, the actual channel is ID mod 16.
# also, the hat number is ID // 16 (round-down division).
# Example: ID=20, 20 % 16 = 4, 20 // 16 = 1. Channel 4, hat 1.
self.channel_id = int(servo_id % 16)
self.shield_id = int(servo_id // 16)
self.min_pulse = min_pulse
self.max_pulse = max_pulse
self.min_angle = min_angle
self.max_angle = max_angle
self.default_angle = default_angle
self.name = name
if self.disabled:
print("Warning: servo '%s' is disabled" % name)
else:
# sanity checking, in case of typos in the config json
assert min_angle != max_angle # linear_map function gets fucky if range is zero
assert min_angle < max_angle # angle-space should never be upside-down
assert min_pulse != max_pulse # linear_map function gets fucky if range is zero
assert 1 <= max_pulse <= 4096 # range of possible values for pwm hat
assert 1 <= min_pulse <= 4096 # range of possible values for pwm hat
assert -360 <= min_angle <= 360 # range of possible angle values
assert -360 <= max_angle <= 360 # range of possible angle values
assert 0 <= servo_id <= 95 # nobody will ever need more than 6 hats
assert min_angle <= default_angle <= max_angle # default angle within valid angle range
# running/idle flags: normal Events can only wait for a rising edge, if I want to wait for a falling edge, i need to
# set up a complementary system like this. also they're mostly being used as flags, not as "events", but whatever.
self.state_flag_running = threading.Event()
self.state_flag_idle = threading.Event()
self.state_flag_idle.set()
# i want setting one/clearing other to be an indivisible atomic operation so it should have a lock object just in case
self.state_flag_lock = threading.Lock()
# the list of frames that the leg thread is consuming as the leg object is adding onto
self.frame_queue = []
# locking object to ensure no collisions happen around the frame queue
self.frame_queue_lock = threading.Lock()
# these variables track the state of the servo at any given instant
self.curr_angle = 0.0
self.curr_pwm = 0
self.curr_on = False
# locking object to ensure no collisions happen around self.curr_angle/self.curr_pwm/self.curr_on
# might not be necessary but couldn't hurt, technically both the servo thread and servo object can write into them
self.curr_state_lock = threading.Lock()
# create and launch the thread for this servo
# note: this MUST be daemon type because the thread is designed to run forever...
# the only way to stop it is by stopping its parent, which means it must be a daemon!
# it will be able to access all of this servos's other member variables and functions
self.framethread_name = "framethread_" + self.name
self.framethread = threading.Thread(name=self.framethread_name,
target=ft.frame_thread_func, args=(self, DEBUG_SERVO_ID))
self.framethread.daemon = True
# start the thread, this should be the 2nd last operation of __init__
self.framethread.start()
# the servo begins as "off" until explicitly told to initialize it
self.off()
def __str__(self):
# self.name, self.id, self.channel_id, self.shield_id, self.min_pulse, self.max_pulse, self.min_angle, self.max_angle, self.default_angle, self.disabled
# curr_angle, curr_pwm, curr_on
s = "name={}, servo_id={}, channel_id={}, shield_id={}, min_pulse={}, max_pulse={}, min_angle={}, max_angle={}, default_angle={}, disabled={}\ncurr_angle={}, curr_pwm={}, curr_on={}"
return s.format(self.name, self.servo_id, self.channel_id, self.shield_id, self.min_pulse, self.max_pulse,
self.min_angle, self.max_angle, self.default_angle, self.disabled,
self.curr_angle, self.curr_pwm, self.curr_on)
def rotate(self, degree):
"""
Rotate to the specified degrees
non-threading method of controlling the servo
perform safety clamp before passing to do_set_angle
"""
if self.disabled:
return
degree_safe = self.degrees_clamp(degree)
# calling the non-threading control function should cancel any pending threading events
self.abort()
try:
self.do_set_angle(degree_safe)
except ValueError as exception:
print(exception)
print("Could not rotate {} to {} degree").format(self.name, degree)
def rotate_thread(self, degree, durr):
"""
Rotate to the specified degrees gradually over the specified duration
threading method of controlling the servo
perform safety clamp, interpolate, append frames to frame queue, and sets running flag
the thread will jump in with "do_set_servo_angle" when it is the correct time
"""
if self.disabled:
return
# safety clamp
dest = self.degrees_clamp(degree)
# if there is a queued interpolation frame, interpolate from the final frame in the queue to the desired pose.
# otherwise, interpolate from current position.
curr = None
with self.frame_queue_lock:
if len(self.frame_queue) > 0:
curr = self.frame_queue[-1][0]
if curr is None: # "else" but outside of the lock block
# floats are always copied, not referenced
curr = self.curr_angle
if self.servo_id == DEBUG_SERVO_ID:
print("servo_%s: interp from deg %d to %d over %f" % (self.name, curr, dest, durr))
# run interpolation
interp_list = ft.interpolate(dest, curr, durr)
# add new frames onto the END of the frame queue (with lock)
with self.frame_queue_lock:
# concatenate two lists with +
self.frame_queue = self.frame_queue + interp_list
if self.servo_id == DEBUG_SERVO_ID:
print("servo_%s: add %d frames, new length %d" % (self.name, len(interp_list), len(self.frame_queue)))
with self.state_flag_lock:
# clear "sleeping" event, does not trigger anything (note: clear before set)
self.state_flag_idle.clear()
# set the "running" event, this will trigger the thread to begin consuming frames
# note: do this unconditionally! no harm in setting an already set flag
self.state_flag_running.set()
def do_set_angle(self, degree):
"""
take angle after clamp, already known to be safe value
convert to pwm, set actual pwm, also set "self.curr_x" values
called by both threading and non-threading approaches
"""
pulse = self.degrees_to_pulse(degree)
with self.curr_state_lock:
self.curr_on = True
self.curr_angle = degree
self.curr_pwm = pulse
set_pwm(self.shield_id, self.channel_id, pulse)
def initialize(self):
""" Move servo to defult position """
print("init name = {}, default_angle = {}".format(self.name, self.default_angle))
self.rotate(self.default_angle)
def off(self):
""" setting PWM to 0 cuts power to the servo and makes it malleable """
if self.disabled:
return
# abort so it doesn't wake up after turning off until i explicitly tell it to wake up
self.abort()
try:
with self.curr_state_lock:
self.curr_on = False
#self.curr_angle = None
self.curr_pwm = 0
set_pwm(self.shield_id, self.channel_id, 0)
except ValueError as exception:
print(exception)
print("Could not turn off", self.name)
# clear the frame queue to stop any currently-pending movements.
def abort(self):
with self.frame_queue_lock:
self.frame_queue = []
def degrees_clamp(self, degree):
# clamp for safety
degree_safe = float(bidirectional_clamp(degree, self.min_angle, self.max_angle))
# warn if clamping actually occurred
if degree != degree_safe:
print("Degree {} is out of range, clamping to safe value {}".format(degree, degree_safe))
return degree_safe
def degrees_to_pulse(self, degree):
""" Map degree input value to a pulse length output value """
# perform actual mapping
pulse = int(linear_map(degree, self.min_angle, self.max_angle, self.min_pulse, self.max_pulse))
return pulse
# removed setters & getters cuz parameters are not redefined after initialization, therefore they are useless
|
util.py
|
import logging
import pickle
from time import time
import math
import faiss
import numpy as np
import torch
from collections import defaultdict
folder_pickles = '../data/'
best_metric=0
#读取和存储数据到pkl
#取数据
def restoreVariableFromDisk(name):
#logging.info('Recovering variable...')
#t0 = time()
val = None
with open(folder_pickles + name + '.pickle', 'rb') as handle:
val = pickle.load(handle)
#t1 = time()
#logging.info('Variable recovered. Time: {}m'.format((t1-t0)/60))
return val
#存数据
def saveVariableOnDisk(f,name):
#logging.info('Saving variable on disk...')
#t0 = time()
with open(folder_pickles + name + '.pickle', 'wb') as handle:
pickle.dump(f, handle, protocol=pickle.HIGHEST_PROTOCOL)
#t1 = time()
#logging.info('Variable saved. Time: {}m'.format((t1-t0)/60))
return
'''
li=[73741,81609]
saveVariableOnDisk(li,'/taobao_data/taobao_feature')
m1,m2=restoreVariableFromDisk('/taobao_data/taobao_feature')
print(m1)
'''
#list=restoreVariableFromDisk('/taobao_data/test_target_taobao')
#print(list[1])
#读取数据
def prepare_data(src, target):
nick_id, item_id = src
hist_item, hist_mask = target
return nick_id, item_id, hist_item, hist_mask
##加载item及其对应的category
def load_item_cate(source):
item_cate = {}
with open(source, 'r') as f:
for line in f:
conts = line.strip().split(',')
item_id = int(conts[0])
cate_id = int(conts[1])
item_cate[item_id] = cate_id
return item_cate
#将MIMN中的兴趣提取出来作为nhp的输入
def load_interst(w_list,time_list):
# 加载兴趣
w_list = [i[0] for i in w_list]
w_list_tensor = torch.stack(w_list)
w_list_max = torch.max(w_list_tensor, dim=-1)[-1]
#w_to_interst = w_list_max.reshape(256, 200) ##这样是Z字形提取
w_to_interst= [w_list_max[:, i] for i in range(256)]
# 将兴趣放到time_list中
for i in range(256):
for j in range(len(time_list[i])):
time_list[i][j]['type_event'] = int(w_to_interst[i][j])
# item_ = [time_list[i][-8:] for i in range(len(time_list))]
# return item_ ##太大,小一点测试速度快
return time_list
def get_lamda(all_lambda_sample):
# 读取时序值到文件
###all_lambda_sample每个时刻每个类型发生的概率,这里numgroup=1占了一个维度,删掉
lambda_x = torch.squeeze(all_lambda_sample, dim=1)
###取最后一个时刻的每个类型发生的概率
lambda_y = torch.squeeze(lambda_x[:, -1:, :], dim=1).cpu()
_,tmp=torch.sort(lambda_y,dim=-1)
_,rank=torch.sort(tmp)
lamda_rank=rank+1
lambda_ = lambda_y.detach().numpy().tolist()
return lambda_,lamda_rank
#评价指标
def get_item(model,topN,EMBEDDING_DIM):
item_embs = model.output_item_em()
'''
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
try:
gpu_index = faiss.GpuIndexFlatIP(res, EMBEDDING_DIM, flat_config)
gpu_index.add(item_embs)
except Exception as e:
return {}
'''
index = faiss.IndexFlatIP(EMBEDDING_DIM)
index.add(item_embs)
user_embs = model.output_user()
# if model_type=='GRU':
# interest_num=len(user_embs)
# D_,I_=[],[]
# for num in range(interest_num):
# d, i = index.search(np.ascontiguousarray(user_embs[num]), topN) ##这样快一点
# D_.append(d)
# I_.append(i)
# ##找不到一个好方法,不具有
# multi_D = list(map(list, zip(D_[0],D_[1], D_[2], D_[3])))
# multi_I = list(map(list, zip(I_[0], I_[1], I_[2], I_[3])))
# D = sum(multi_D, [])
# I = sum(multi_I, [])
#
# else:
D, I = index.search(np.ascontiguousarray(user_embs), topN) ##这样快一点
#D, I = index.search(user_embs, topN)
return I,D
#多样性计算
def compute_diversity(item_list, item_cate_map):
n = len(item_list)
diversity = 0.0
for i in range(n):
for j in range(i+1, n):
diversity += item_cate_map[item_list[i]] != item_cate_map[item_list[j]]
diversity /= ((n-1) * n / 2)
return diversity
def evaluate(target_item,dpp_item,item_cate_map,total_recall,total_ndcg,total_hitrate,total_diversity,save=True):
for i, iid_list in enumerate(target_item):
recall = 0
dcg = 0.0
for no, iid in enumerate(iid_list):
if iid in dpp_item[i]:
recall += 1
dcg += 1.0 / math.log(no + 2, 2)
idcg = 0.0
for no in range(recall):
idcg += 1.0 / math.log(no + 2, 2)
total_recall += recall * 1.0 / len(iid_list)
if recall > 0:
total_ndcg += dcg / idcg
total_hitrate += 1
if save:
total_diversity += compute_diversity(dpp_item[i], item_cate_map)
return total_recall,total_ndcg,total_hitrate,total_diversity
'''
def evaluate_full(test_data, model, topN,EMBEDDING_DIM,memory_size,item_cate_map, save=True, coef=None):
#item_embs = model.output_item(sess) #所有item embedding
item_embs = model.output_item_em()
# res = faiss.StandardGpuResources()
# flat_config = faiss.GpuIndexFlatConfig()
# flat_config.device = 0
#
# try:
# gpu_index = faiss.GpuIndexFlatIP(res, EMBEDDING_DIM, flat_config)
# gpu_index.add(item_embs)
# except Exception as e:
# return {}
index = faiss.IndexFlatIP(EMBEDDING_DIM)
index.add(item_embs)
total = 0
total_recall = 0.0
total_ndcg = 0.0
total_hitrate = 0
total_map = 0.0
total_diversity = 0.0
for src, tgt in test_data:
nick_id, item_id, hist_item, hist_mask = prepare_data(src, tgt) #不需要nick_id
#user_embs = model.output_user(sess, [hist_item, hist_mask])
user_embs = model.output_user()
D, I = index.search(user_embs, topN)
ni = memory_size
for i, iid_list in enumerate(item_id):
recall = 0
dcg = 0.0
item_list_set = set()
if coef is None:
item_list = list(
zip(np.reshape(I[i * ni:(i + 1) * ni], -1), np.reshape(D[i * ni:(i + 1) * ni], -1)))
item_list.sort(key=lambda x: x[1], reverse=True)
for j in range(len(item_list)):
if item_list[j][0] not in item_list_set and item_list[j][0] != 0:
item_list_set.add(item_list[j][0])
if len(item_list_set) >= topN:
break
else:
origin_item_list = list(
zip(np.reshape(I[i * ni:(i + 1) * ni], -1), np.reshape(D[i * ni:(i + 1) * ni], -1)))
origin_item_list.sort(key=lambda x: x[1], reverse=True)
item_list = []
tmp_item_set = set()
for (x, y) in origin_item_list:
if x not in tmp_item_set and x in item_cate_map:
item_list.append((x, y, item_cate_map[x]))
tmp_item_set.add(x)
cate_dict = defaultdict(int)
for j in range(topN):
max_index = 0
max_score = item_list[0][1] - coef * cate_dict[item_list[0][2]]
for k in range(1, len(item_list)):
if item_list[k][1] - coef * cate_dict[item_list[k][2]] > max_score:
max_index = k
max_score = item_list[k][1] - coef * cate_dict[item_list[k][2]]
elif item_list[k][1] < max_score:
break
item_list_set.add(item_list[max_index][0])
cate_dict[item_list[max_index][2]] += 1
item_list.pop(max_index)
for no, iid in enumerate(iid_list):
if iid in item_list_set:
recall += 1
dcg += 1.0 / math.log(no + 2, 2)
idcg = 0.0
for no in range(recall):
idcg += 1.0 / math.log(no + 2, 2)
total_recall += recall * 1.0 / len(iid_list)
if recall > 0:
total_ndcg += dcg / idcg
total_hitrate += 1
if not save:
total_diversity += compute_diversity(list(item_list_set), item_cate_map)
total += len(item_id)
recall = total_recall / total
ndcg = total_ndcg / total
hitrate = total_hitrate * 1.0 / total
diversity = total_diversity * 1.0 / total
if save:
return {'recall': recall, 'ndcg': ndcg, 'hitrate': hitrate}
return {'recall': recall, 'ndcg': ndcg, 'hitrate': hitrate, 'diversity': diversity}
def recall_N(y_true, y_pred, N=10):
return len(set(y_pred[:N]) & set(y_true)) * 1.0 / len(y_true)
def sampledsoftmaxloss(y_true, y_pred):
return K.mean(y_pred)
def get_item_embedding(item_embedding, item_input_layer):
embedding = nn.Embedding(10, 2) # 10个词,每个词2维
return embedding
y_pred=[1,2,3]
y_true=[3,4,5]
m=recall_N(y_pred,y_true) #0.3333
print("good")
def tf_embeddinglook(n_dim,embedding_dim):
embedding = tf.constant(
[[0.21,0.41,0.51,0.11],
[0.22,0.42,0.52,0.12],
[0.23,0.43,0.53,0.13],
[0.24,0.44,0.54,0.14]],dtype=tf.float32)
# 指定的索引,用户找字典
feature_batch = tf.constant([2, 3, 1, 0])
feature_batch2=tf.constant([2,3])
# 在embedding_lookup中,第一个参数相当于一个二维的词表,并根据第二个参数中指定的索引,去词表中寻找并返回对应的行
get_embedding1 = tf.nn.embedding_lookup(embedding, feature_batch)
get_embedding2 = tf.nn.embedding_lookup(embedding, feature_batch2)
def embeddinglook(n_dim,embedding_dim,idex):
# 示例
embeds = t.nn.Embedding(2, 5)
# 得到word embedding里面关于hello这个词的初始词向量
idex_lis = [0, 1]
idex=embeds(t.LongTensor(idex_lis))
# 全局编码
embeds = t.nn.Embedding(n_dim, embedding_dim)
# 得到embedding里面关于idex的初始词向量
idex_embedding=embeds(t.LongTensor(idex)) #tensor
idex_embedding=idex_embedding.detach().numpy() #转为numpy
return idex_embedding
#embeddinglook(4,4,[3,2,2,2,2])
#py2运行出来的代码需要用这种方式读取
def restoreVariableFromDisk_py2(name):
logging.info('Recovering variable...')
t0 = time()
val = None
with open(folder_pickles + name + '.pickle', 'rb') as handle:
val = pickle.load(handle,encoding='iso-8859-1')
t1 = time()
logging.info('Variable recovered. Time: {}m'.format((t1-t0)/60))
return val
import copy
##这里希望把数值分成4:3:2:1的比例,不改变原来的额顺序----考虑优化
Gamma_sort=copy.deepcopy(Gamma)
for list in Gamma_sort:
list_new=sorted(list)
for num,li in enumerate(list):
value=list_new.index(li)+1
list[num]=value
'''
'''
def generator_queue(generator, max_q_size=20,
wait_time=0.1, nb_worker=1):
generator_threads = []
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
# start_time = time.time()
# generator_output = next(generator) # 执行一次Dataiterator里的next函数
generator_output = generator.next()
# end_time = time.time()
# print end_time - start_time
q.put(generator_output)
else:
# time.sleep(wait_time)
continue
except Exception:
_stop.set()
print("over1")
# raise
for i in range(nb_worker):
thread = multiprocessing.Process(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except Exception:
_stop.set()
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
print("over")
return q, _stop, generator_threads
'''
|
fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class FIFOQueueTest(xla_test.XLATestCase):
def testEnqueue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testMultipleDequeues(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue([1]))
self.evaluate(q.enqueue([2]))
self.evaluate(q.enqueue([3]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
def testQueuesDontShare(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegex(ValueError, "must have names"):
q.enqueue({"a": 12.0})
def testParallelEnqueue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
if __name__ == "__main__":
test.main()
|
rsa.py
|
#!/usr/bin/python
'''*****************************************************************************
Purpose: To analyze the sentiments of the reddit
This program uses Vader SentimentIntensityAnalyzer to calculate the ticker compound value.
You can change multiple parameters to suit your needs. See below under "set program parameters."
Implementation:
I am using sets for 'x in s' comparison, sets time complexity for "x in s" is O(1) compare to list: O(n).
Limitations:
It depends mainly on the defined parameters for current implementation:
It completely ignores the heavily downvoted comments, and there can be a time when
the most mentioned ticker is heavily downvoted, but you can change that in upvotes variable.
Author: github:asad70
****************************************************************************'''
#imports by asad70
from operator import ne
from unittest import expectedFailure
import praw
from pymysql import NULL
from data import *
import time
import pandas as pd
import matplotlib.pyplot as plt
import squarify
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
import emoji # removes emojis
import re # removes links
import en_core_web_sm
import string
#imports (to build flask app with csv/mysql database, api data collection, schedules, threading, multiprocessing) by me
import nltk
nltk.download('wordnet', quiet=True) #what does this do?
nltk.download('vader_lexicon', quiet=True)
from prawcore.exceptions import Forbidden
from multiprocessing import Process
from threading import Thread
import datetime
import pymysql
import os, sys, csv, requests, schedule, pathlib, pprint, urllib.request, ast
'''*****************************************************************************
# program options & environment variables
*****************************************************************************'''
isPrint_logs = True
use_sentiment_analysis_and_visualization = False
storagetype = "mysql"
write_empty_newoutputfile = False #default: False
max_output_amount = 25
if max_output_amount < 1: raise ValueError('max output amount cannot be <1')
IEX_TOKEN = os.environ.get('IEX_TOKEN')
IEX_TOKEN = F'?token={IEX_TOKEN}'
IEX_TOKEN_SANDBOX = os.environ.get('IEX_TOKEN_SANDBOX')
IEX_TOKEN_SANDBOX = F'?token={IEX_TOKEN_SANDBOX}'
'''*****************************************************************************
# csv (for data storage):
# variables of csv file paths
*****************************************************************************'''
path_repo = str(pathlib.Path(os.path.dirname(os.path.realpath(__file__)) + '/..'))
path_csvfiles = '/csvfiles'
path_repo_and_csvfiles = str(pathlib.Path(path_repo + path_csvfiles))
'''*****************************************************************************
# mysql (for data storage):
# variables + establish connection to mysql database (can't do variable initialization idk why)
# program options
*****************************************************************************'''
def connect_to_mysql():
connection = pymysql.connect(
host=os.environ.get('MYSQL_HOST_RDS'),
user=os.environ.get('MYSQL_USER_RDS'),
password=os.environ.get('MYSQL_PASSWORD_RDS'),
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
return connection, cursor
if storagetype == "mysql":
connection, cursor = connect_to_mysql()
# db_name1 = 'rsa_db_onetableversion'
db_name1 = 'rsa_db'
'''*****************************************************************************
# Parameters for main function
*****************************************************************************'''
input_api_nasdaq = 'api.nasdaq.com'
output_filename0 = 'result_test_'
output_filename1 = 'result_all_'
output_filename1_RDS = 'result_all_rds_'
output_filename2 = 'result_200b_'
output_filename3 = 'result_15b_'
output_filename4 = 'result_4b_'
output_filename4_RDS = 'result_4b_rds_'
output_filename5 = 'result_4m_'
subs_specificlist1 = ['wallstreetbets']
subs_specificlist2 = ['Stocks', 'Bitcoin', 'Wallstreetbetsnew', 'PennyStocks', 'algotrading', 'Economics', 'investing', 'Pennystocks', 'StockMarket', 'stocks', 'Investing', 'pennystocks', 'Options', 'AlgoTrading', 'wallstreetbets', 'Cryptocurrency', 'WallStreetBets']
subs_specificlist3 = ['Bitcoin', 'Cryptocurrency', 'DayTrading']
subs_membercount_min1 = 0
subs_membercount_min2 = 600000
subs_membercount_min3 = 1000000
marketcap_min0 = 0
marketcap_min1 = 1000
marketcap_min2 = 1000000000
marketcap_max1 = 35000000000000 #all
marketcap_max2 = 200000000000
marketcap_max3 = 15000000000
marketcap_max4 = 4000000000
marketcap_max5 = 4000000
#testing line 306 #limit amount of symbols/picks printed
# if top_picks.index(i) >= picks: #testing
# break
'''*****************************************************************************
# "worker" functions
*****************************************************************************'''
def ftn_rsa1():
print('ftn_rsa1() on rsa.py used')
def warning_maxoutputexceeded(list_existingoutputfiles1, max_output_amount):
if len(list_existingoutputfiles1) > max_output_amount:
for r in range(3): #input() doesn't work in multithreading mode
print(f"Note: output file count: {len(list_existingoutputfiles1)} > max_output_amount: {max_output_amount}")
a = input("Max # of allowed output files is LOWER than existing output files. Proceeding will limit existing output files by deleting the oldest, excessive output files. Do you want to continue? (Y/N) ")
if a.lower() == "y" or a.lower() == "yes":
break
elif a.lower() == "n" or a.lower() == "no" or r >= 2:
print("User chose to not continue.. stopping the program now. Review the 'max output amount' variable.")
sys.exit()
#os._exit() #exits the whole process i think.
else:
continue
# JUST ADDED
# for prepare_variables1, deleteandrename_existing.. functions
def check_exists_3tables(outputname_userinput):
'''*****************************************************************************
### 0 - check if database, parent table, child table exist or doesn't exist yet
*****************************************************************************'''
exists_database = None
exists_parenttable = None
exists_childtable = None
### 0
#check if database exists
cursor.execute(f"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '{db_name1}';")
result = cursor.fetchall()
if result == () or result == None: exists_database = False
else: exists_database = True
#check if parent table exists
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND TABLE_NAME = '{outputname_userinput}parent';")
result = cursor.fetchall()
if result == () or result == None: exists_parenttable = False
else: exists_parenttable = True
#check if child table exists
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND TABLE_NAME = '{outputname_userinput}child';")
result = cursor.fetchall()
if result == () or result == None: exists_childtable = False
else: exists_childtable = True
return exists_database, exists_parenttable, exists_childtable
# OLD
def prepare_variables1_csv_and_sql(storagetype, outputname_userinput, max_output_amount):
'''*****************************************************************************
# Preparing latest outputname_userinput filename
# Parameter: outputname_userinput, max_output_amount
#1 get a list of existing saved output file/table that contains given outputname_userinput = ok
#1.5 warn the user about max_output_amount deleting the oldest, excessive output files = ok
#2 get len = ok
#3 get new ref number (10 if 10 files there already, 10 if 9 there already, 9 if 8 files there already, 1 if 0, 2 if 1) = ok
#4 get potential outputname_userinput filename.. to be created if program finishes) = ok
*****************************************************************************'''
if storagetype != "mysql" and storagetype != "csv":
print("warning: check your storagetype entry. Could be simply mispelled.")
#1
if storagetype == "mysql":
# 0 - if database doesn't exist yet, create one
cursor.execute(f"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '{db_name1}';")
result = cursor.fetchall()
if result == () or result == None:
print(f"No such database exists. Creating database {db_name1}...")
cursor.execute(f"CREATE DATABASE {db_name1}")
print(f"Successfully created {db_name1}")
# 1 - get a list of existing saved tables that contains given outputname_userinput
list_existingoutputfiles1 = []
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '{outputname_userinput}%';")
result = cursor.fetchall()
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
#print('list_existingoutputfiles1 (prepare_variables1_csv_and_sql)'.ljust(55), list_existingoutputfiles1) #log
if storagetype == "csv":
# 1
list_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
#print('checking', a, 'with', outputname_userinput) #log
# if a.startswith(outputname_userinput + '0') or a.startswith(outputname_userinput + '1'): not needed
if a.startswith(outputname_userinput):
list_existingoutputfiles1.append(a)
#print('list_existingoutputfiles1 (prepare_variables1_csv) 1', list_existingoutputfiles1) #log
# 1.5 - don't use on AWS b/c it prompts user input
# warning_maxoutputexceeded(list_existingoutputfiles1, max_output_amount)
# 2,3
if len(list_existingoutputfiles1) >= max_output_amount:
new_ref_number = max_output_amount
else:
new_ref_number = len(list_existingoutputfiles1) + 1
#print('new_ref_number: ', new_ref_number) #log
# 4
if storagetype == "mysql":
if new_ref_number < 10:
outputname_generated = outputname_userinput + '00' + str(new_ref_number)
elif new_ref_number >= 10 and new_ref_number < 100:
outputname_generated = outputname_userinput + '0' + str(new_ref_number)
elif new_ref_number >= 100 and new_ref_number < 1000:
outputname_generated = outputname_userinput + str(new_ref_number)
#print('outputname_generated:', outputname_generated) #log
if storagetype == "csv":
if new_ref_number < 10:
outputname_generated = path_repo_and_csvfiles + "/" + outputname_userinput + '00' + str(new_ref_number) + '.csv'
elif new_ref_number >= 10 and new_ref_number < 100:
outputname_generated = path_repo_and_csvfiles + "/" + outputname_userinput + '0' + str(new_ref_number) + '.csv'
elif new_ref_number >= 100 and new_ref_number < 1000:
outputname_generated = path_repo_and_csvfiles + "/" + outputname_userinput + str(new_ref_number) + '.csv'
#print('outputname_generated', outputname_generated) #log
return outputname_generated, list_existingoutputfiles1, new_ref_number
# JUST ADDED
def prepare_variables1_sql_parentandchildtables(outputname_userinput, max_output_amount):
'''*****************************************************************************
### 0 - check if database, parent table, child table exist or doesn't exist yet
### 1 - get new ref number (set as 1 if tables dont exist OR get latest/highest parenttable_id from child table if exist)
### 2 - adjust the new ref number (parenttable_id + 1) -- (using max_output = 10: 10 if parenttable_id = 10 there already, 10 if 9 there already, 9 if 8 files there already, 1 if 0, 2 if 1)
### parameters: outputname_userinput, max_output_amount
### return variables: new_ref_number
*****************************************************************************'''
exists_database, exists_parenttable, exists_childtable = check_exists_3tables(outputname_userinput)
print("------------------------------------------------------")
print('prepare_variables1_sql_parentandchildtables()')
print('outputname_userinput:', outputname_userinput)
print(f"exists_database={exists_database} | exists_parenttable={exists_parenttable} | exists_childtable={exists_childtable}")
### 1
new_ref_number = 0
#select parenttable_id from result_all_child order by parenttable_id desc limit 1;
#xxx 1
if exists_database == False and exists_parenttable == False and exists_childtable == False:
#preparevariables1 = ok
#step 0: set ref number as 0 (+ 1)
new_ref_number = 0
#0xx 2
if exists_database == True and exists_parenttable == False and exists_childtable == False:
#preparevariables1 = ok
#step 0: set ref number as 0 (+ 1)
new_ref_number = 0
#0x0 2
if exists_database == True and exists_parenttable == False and exists_childtable == True:
#preparevariables1 = ok
#step 0: get latest/highest parenttable_id from child table for ref number
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
#turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print(list_existingoutputfiles1)
if list_existingoutputfiles1 == []:
new_ref_number = 0
else:
# new_ref_number = list_existingoutputfiles1[-1]
new_ref_number = len(list_existingoutputfiles1)
#00x 3
if exists_database == True and exists_parenttable == True and exists_childtable == False:
#preparevariables1 = ok
#step 0: set ref number as 0 (+ 1)
new_ref_number = 0
#000 2
if exists_database == True and exists_parenttable == True and exists_childtable == True:
#preparevariables1 = ok
#step 0: get latest/highest parenttable_id from child table for ref number
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
#turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print(list_existingoutputfiles1)
if list_existingoutputfiles1 == []:
new_ref_number = 0
else:
# new_ref_number = list_existingoutputfiles1[-1]
new_ref_number = len(list_existingoutputfiles1)
### 2
if new_ref_number >= max_output_amount:
print(f'limiting new_ref_number from {new_ref_number} to {max_output_amount}')
new_ref_number = max_output_amount
else:
new_ref_number += 1
print("new_ref_number set as", new_ref_number)
return new_ref_number
# OK
def prepare_variables2_additional_info(subs, marketcap_max):
dt_string = datetime.datetime.now().strftime("%m/%d/%Y %H:%M")
info_subcount = 'Sub count: ' + str(len(subs))
if marketcap_max > 2000000000000:
info_marketCap_limit = 'Market Cap min: >2 trillions'
else:
info_marketCap_limit = 'Market Cap min: ' + str(marketcap_max/1000000000) + ' billion(s)'
subreddit_count = len(subs)
return dt_string, info_subcount, info_marketCap_limit, subreddit_count
# OK
def print_logs1(dt_string, outputname_generated, info_subcount, info_marketCap_limit, us):
if isPrint_logs == True:
print("------------------------------------------------------")
print("Date and Time: " + dt_string + " (Beg main)")
print('Path outputname_userinput: ' + outputname_generated)
print(info_subcount)
print(info_marketCap_limit)
print('Number of tickers found (from input): ' + str(len(us)))
# OK
def data_extractor(reddit, subs, us):
##def data_extractor(reddit):
'''extracts all the data from reddit
Parameter: reddt: reddit obj
Return: posts, c_analyzed, tickers, titles, a_comments, picks, subs, picks_ayz
posts: int: # of posts analyzed
c_analyzed: int: # of comments analyzed
tickers: dict: all the tickers found
titles: list: list of the title of posts analyzed
a_comments: dict: all the comments to analyze
picks: int: top picks to analyze
subs: int: # of subreddits analyzed
picks_ayz: int: top picks to analyze
'''
'''############################################################################'''
#default
#subs =
post_flairs = {'Daily Discussion', 'Weekend Discussion', 'Discussion'} # posts flairs to search || None flair is automatically considered
goodAuth = {'AutoModerator'} # authors whom comments are allowed more than once
uniqueCmt = True # allow one comment per author per symbol
ignoreAuthP = {'example'} # authors to ignore for posts
ignoreAuthC = {'example'} # authors to ignore for comment
upvoteRatio = 0.5 # upvote ratio for post to be considered, 0.70 = 70%
ups = 1 # define # of upvotes, post is considered if upvotes exceed this # #20
limit = 1 # define the limit, comments 'replace more' limit
upvotes = 1 # define # of upvotes, comment is consi adered if upvotes exceed this #20
picks = 50 # define # of picks here, prints as "Top ## picks are:" 10
picks_ayz = 25 # define # of picks for sentiment analysis 5
info_parameters = "upvoteRatio: " + str(upvoteRatio) + " | ups: " + str(ups) + " | limit: " + str(limit) + " | upvotes: " + str(upvotes) + " | picks: " + str(picks) + " | picks_ayz: " + str(picks_ayz) #logprint
if isPrint_logs == True:
print(info_parameters)
print()
'''############################################################################'''
posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {}
cmt_auth = {}
for sub in subs:
try:
subreddit = reddit.subreddit(sub)
hot_python = subreddit.hot() # sorting posts by hot
# Extracting comments, symbols from subreddit
for submission in hot_python:
flair = submission.link_flair_text
####
####
#author = submission.author.name
#OR
try:
author = submission.author.name
except (AttributeError):
#if author == None: ###########possible fix
#print(str(submission.author.name), ' --> AttributeErrorignored')
continue
####
####
# checking: post upvote ratio # of upvotes, post flair, and author
if submission.upvote_ratio >= upvoteRatio and submission.ups > ups and (flair in post_flairs or flair is None) and author not in ignoreAuthP:
submission.comment_sort = 'new'
comments = submission.comments
titles.append(submission.title)
posts += 1
try:
submission.comments.replace_more(limit=limit)
for comment in comments:
# try except for deleted account?
try: auth = comment.author.name
except: pass
c_analyzed += 1
# checking: comment upvotes and author
if comment.score > upvotes and auth not in ignoreAuthC:
split = comment.body.split(" ")
for word in split:
word = word.replace("$", "")
# upper = ticker, length of ticker <= 5, excluded words,
if word.isupper() and len(word) <= 5 and word not in blacklist and word in us:
# unique comments, try/except for key errors
if uniqueCmt and auth not in goodAuth:
try:
if auth in cmt_auth[word]: break
except: pass
# counting tickers
if word in tickers:
tickers[word] += 1
a_comments[word].append(comment.body)
cmt_auth[word].append(auth)
count += 1
else:
tickers[word] = 1
cmt_auth[word] = [auth]
a_comments[word] = [comment.body]
count += 1
except Exception as e: print(e)
except Forbidden:
continue #SKIP SUBreddit that gives off 403 error>..?
return posts, c_analyzed, tickers, titles, a_comments, picks, subs, picks_ayz, info_parameters, upvoteRatio, ups, limit, upvotes
# OK
def print_helper(tickers, picks, c_analyzed, posts, subs, titles, time, start_time):
'''prints out top tickers, and most mentioned tickers
Parameter: tickers: dict: all the tickers found
picks: int: top picks to analyze
c_analyzed: int: # of comments analyzed
posts: int: # of posts analyzed
subs: int: # of subreddits analyzed
titles: list: list of the title of posts analyzed
time: time obj: top picks to analyze
start_time: time obj: prog start time
Return: symbols: dict: dict of sorted tickers based on mentions
times: list: include # of time top tickers is mentioned
top: list: list of top tickers
'''
#global top_picks #only needed for printing
# sorts the dictionary
symbols = dict(sorted(tickers.items(), key=lambda item: item[1], reverse = True))
top_picks = list(symbols.keys())[0:picks]
seconds_took = (time.time() - start_time) # used to time, before renaming to seconds_took
info_ittookxseconds = "It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.".format(t=seconds_took, c=c_analyzed, p=posts, s=len(subs)) #log print
if isPrint_logs == True:
# print top picks
#print("It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.".format(t=time, c=c_analyzed, p=posts, s=len(subs)))
#OR
#info_ittookxseconds
print(info_ittookxseconds)
#print("Posts analyzed saved in titles\n")
#for i in titles: print(i) # prints the title of the posts analyzed
print("{} most mentioned tickers: ".format(picks))
times = []
top = []
for i in top_picks:
#testing
# if top_picks.index(i) >= picks:
# break
#limit amount of symbols/picks printed
if isPrint_logs == True:
if top_picks.index(i) < 5: #only print up to 5
print("{}: {}".format(i,symbols[i]))
times.append(symbols[i])
top.append("{}: {}".format(i,symbols[i]))
return symbols, times, top, info_ittookxseconds, seconds_took
# OK
def sentiment_analysis(picks_ayz, a_comments, symbols, us):
##def sentiment_analysis(picks_ayz, a_comments, symbols)
'''analyzes sentiment anaylsis of top tickers
Parameter: picks_ayz: int: top picks to analyze
a_comments: dict: all the comments to analyze
symbols: dict: dict of sorted tickers based on mentions
Return: scores: dictionary: dictionary of all the sentiment analysis
'''
scores = {}
vader = SentimentIntensityAnalyzer()
vader.lexicon.update(new_words) # adding custom words from data.py
picks_sentiment = list(symbols.keys())[0:picks_ayz]
for symbol in picks_sentiment:
stock_comments = a_comments[symbol]
for cmnt in stock_comments:
emojiless = emoji.get_emoji_regexp().sub(u'', cmnt) # remove emojis
# remove punctuation
text_punc = "".join([char for char in emojiless if char not in string.punctuation])
text_punc = re.sub('[0-9]+', '', text_punc)
# tokenizeing and cleaning
tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|http\S+')
tokenized_string = tokenizer.tokenize(text_punc)
lower_tokenized = [word.lower() for word in tokenized_string] # convert to lower case
# remove stop words
nlp = en_core_web_sm.load()
stopwords = nlp.Defaults.stop_words
sw_removed = [word for word in lower_tokenized if not word in stopwords]
# normalize the words using lematization
lemmatizer = WordNetLemmatizer()
lemmatized_tokens = ([lemmatizer.lemmatize(w) for w in sw_removed])
# calculating sentiment of every word in comments n combining them
score_cmnt = {'neg': 0.0, 'neu': 0.0, 'pos': 0.0, 'compound': 0.0}
word_count = 0
for word in lemmatized_tokens:
if word.upper() not in us:
score = vader.polarity_scores(word)
word_count += 1
for key, _ in score.items():
score_cmnt[key] += score[key]
else:
score_cmnt['pos'] = 2.0
# calculating avg.
try: # handles: ZeroDivisionError: float division by zero
for key in score_cmnt:
score_cmnt[key] = score_cmnt[key] / word_count
except: pass
# adding score the the specific symbol
if symbol in scores:
for key, _ in score_cmnt.items():
scores[symbol][key] += score_cmnt[key]
else:
scores[symbol] = score_cmnt
# calculating avg.
for key in score_cmnt:
scores[symbol][key] = scores[symbol][key] / symbols[symbol]
scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key])
return scores
# OK
def visualization(picks_ayz, scores, picks, times, top):
'''prints sentiment analysis
makes a most mentioned picks chart
makes a chart of sentiment analysis of top picks
Parameter: picks_ayz: int: top picks to analyze
scores: dictionary: dictionary of all the sentiment analysis
picks: int: most mentioned picks
times: list: include # of time top tickers is mentioned
top: list: list of top tickers
Return: None
'''
# printing sentiment analysis
if isPrint_logs == True:
print("\nSentiment analysis of top {} picks:".format(picks_ayz))
df = pd.DataFrame(scores)
df.index = ['Bearish', 'Neutral', 'Bullish', 'Total/Compound']
df = df.T
print('df: ')
print(df)
#print(df.head(6).max())
# Date Visualization
# most mentioned picks
squarify.plot(sizes=times, label=top, alpha=.7 )
plt.axis('off')
#plt.title(f"{picks} most mentioned picks")
plt.title("{} most mentioned picks".format(picks))
#plt.show()
# Sentiment analysis
df = df.astype(float)
colors = ['red', 'springgreen', 'forestgreen', 'coral']
#df.plot(kind = 'bar', color=colors, title=f"Sentiment analysis of top {picks_ayz} picks:")
#df.plot(kind = 'bar', color=colors, title="Sentiment analysis of top {} picks:".format(picks_ayz))
#plt.show()
uselessvariable1 = 'this is a useless variable to force-hide show plt.show() above when minimizing this function'
# OK
def print_logs2(symbols, scores):
'''*****************************************************************************
# Info logs for console program - additional info, optional
*****************************************************************************'''
if isPrint_logs == True:
print("print1.1: ", symbols, "n\\") #aka tickers, mention count, dict pair of tickers and mentions
print("print2: ", scores)
endingvar = None
# JUST ADDED
def create_missingtables_and_clearparenttable(outputname_userinput):
'''*****************************************************************************
### 0 - check if db/tables exist
### 1 - create db/tables (ones that are missing)
*****************************************************************************'''
exists_database, exists_parenttable, exists_childtable = check_exists_3tables(outputname_userinput)
print('\ncreate_missingtables_and_clearparenttable()')
print(f"exists_database={exists_database} | exists_parenttable={exists_parenttable} | exists_childtable={exists_childtable}")
query_db = f"CREATE DATABASE {db_name1}"
query_parent = f"CREATE TABLE {db_name1}.{outputname_userinput}parent (parenttable_id INT UNIQUE, subreddit_count INT, upvote_ratio DECIMAL(16, 1), ups INT, limit_reddit INT, upvotes INT, picks INT, picks_ayz INT, seconds_took DECIMAL(16, 1), comments_analyzed INT, datetime DATETIME, tickers_found INT, tickers_rsa INT, min_market_cap DECIMAL(16, 2), max_market_cap DECIMAL(16, 2));"
query_child = f"CREATE TABLE {db_name1}.{outputname_userinput}child (ticker_id INT, symbol TEXT, mentions INT, market_cap DECIMAL(16,2), latest_price DECIMAL(16,2), change_percent DECIMAL(16,2), pe_ratio DECIMAL(16,2), company_name TEXT, datetime DATETIME, parenttable_id INT);"
#xxx 1 = tested/ok
if exists_database == False and exists_parenttable == False and exists_childtable == False:
#step 1: create the database, parent, and child table (3 things) = ok
cursor.execute(query_db)
cursor.execute(query_parent)
cursor.execute(query_child)
print('xxx -> 000')
#0xx 2 = tested/ok
if exists_database == True and exists_parenttable == False and exists_childtable == False:
#step 1: create parent and child table (2 things) = ok
cursor.execute(query_parent)
cursor.execute(query_child)
print('0xx -> 000')
#0x0 2 = tested/ok
if exists_database == True and exists_parenttable == False and exists_childtable == True:
#step 1: create parent table (1 thing) = ok
cursor.execute(query_parent)
print('0x0 -> 000')
#00x 3 = tested/ok, should replace clear parent table part with mirror_outputs()
if exists_database == True and exists_parenttable == True and exists_childtable == False:
#step 1: clear parent table, create child table (2 things) = ok
query_clearparenttable = f"DELETE FROM {db_name1}.{outputname_userinput}parent;"
cursor.execute(query_clearparenttable)
cursor.execute(query_child)
print('00x -> 000')
#000 3 = tested/ok
if exists_database == True and exists_parenttable == True and exists_childtable == True:
print('000 -> 000')
# JUST ADDED
def setup_foreign_key_and_after_delete_trigger(outputname_userinput):
'''*****************************************************************************
### 0 - check if db, parent and child tables exist
### 1 - create fk/triggers
*****************************************************************************'''
exists_database, exists_parenttable, exists_childtable = check_exists_3tables(outputname_userinput)
print('\nsetup_foreign_key_and_after_delete_trigger()')
print(f"exists_database={exists_database} | exists_parenttable={exists_parenttable} | exists_childtable={exists_childtable}")
#000 = testing
if exists_database == True and exists_parenttable == True and exists_childtable == True:
# add foreign key
sql = f'ALTER TABLE {db_name1}.{outputname_userinput}child ADD CONSTRAINT fk_{outputname_userinput} FOREIGN KEY (parenttable_id) REFERENCES {db_name1}.{outputname_userinput}parent (parenttable_id) ON DELETE CASCADE;'
try:
cursor.execute(sql)
print("added foreign key fk_a1")
except Exception as e:
print(e)
# add trigger (after delete)
sql = '''
CREATE TRIGGER {0}.trigger_{1}
AFTER DELETE ON {3}
FOR EACH ROW
begin
DELETE FROM {2} p
WHERE p.parenttable_id = OLD.parenttable_id
AND
( SELECT COUNT(CASE WHEN {3}.parenttable_id = OLD.parenttable_id THEN 1 END) FROM {3} ) = 0;
end;
'''.format(f"{db_name1}", f"trigger_{outputname_userinput}", f"{db_name1}.{outputname_userinput}parent", f"{db_name1}.{outputname_userinput}child")
# print(sql)
try:
cursor.execute(sql)
print("added trigger (after delete)")
except Exception as e:
print(e)
# OLD
def deleteandrename_existingoutputfiles_csv_and_sql(storagetype, list_existingoutputfiles1, max_output_amount, outputname_userinput):
'''*****************************************************************************
# Manage result files for proper numbering and up-to-date content
#1 Delete first excessive result files (if result files exceed maximum allowed) = ok
#2 Adjust other result files' numbers (ex: 2-10 to 1-9.. up to max_output_amount) = ok
*****************************************************************************'''
# log
if storagetype == "mysql":
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '%{outputname_userinput}%';")
myresult = cursor.fetchall()
previewlist_existingoutputfiles1 = [list(a.values())[0] for a in myresult]
print('list_existingoutputfiles1 (...)'.ljust(55), previewlist_existingoutputfiles1) #log
if storagetype == "csv":
previewlist_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
if a.startswith(outputname_userinput):
previewlist_existingoutputfiles1.append(a)
print('list_existingoutputfiles1 (...)'.ljust(55), previewlist_existingoutputfiles1) #log
#1
if storagetype == "mysql":
while True:
if len(list_existingoutputfiles1) >= max_output_amount:
#delete first table - sql
cursor.execute(f"DROP TABLE {db_name1}.{list_existingoutputfiles1[0]};")
#reinitialize list of tables - sql
list_existingoutputfiles1 = []
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '%{outputname_userinput}%';")
myresult = cursor.fetchall()
list_existingoutputfiles1 = [list(a.values())[0] for a in myresult]
else:
break
if storagetype == "csv":
while True:
if len(list_existingoutputfiles1) >= max_output_amount:
#delete first table - csv
delete_file = path_repo_and_csvfiles + "/" + list_existingoutputfiles1[0]
os.remove(delete_file)
#reinitialize list of tables - csv
list_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
#print('checking', a, 'with', outputname_userinput) #log
if a.startswith(outputname_userinput):
list_existingoutputfiles1.append(a)
else:
break
#log
if storagetype == "mysql":
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '%{outputname_userinput}%';")
myresult = cursor.fetchall()
previewlist_existingoutputfiles1 = [list(a.values())[0] for a in myresult]
print('list_existingoutputfiles1 (after del excess) '.ljust(55), previewlist_existingoutputfiles1) #log
if storagetype == "csv":
previewlist_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
if a.startswith(outputname_userinput):
previewlist_existingoutputfiles1.append(a)
print('list_existingoutputfiles1 (after del excess) '.ljust(55), previewlist_existingoutputfiles1) #log
#2
if storagetype == "mysql":
for a in list_existingoutputfiles1:
try:
num_file = list_existingoutputfiles1.index(a) + 1 #adjust from 0 to 1
old_filename = f"{db_name1}.{a}"
if num_file < 10:
new_filename = f"{db_name1}.{outputname_userinput}00{num_file}"
elif num_file >= 10 and num_file < 100:
new_filename = f"{db_name1}.{outputname_userinput}0{num_file}"
elif num_file >= 100 and num_file < 1000:
new_filename = f"{db_name1}.{outputname_userinput}{num_file}"
cursor.execute(f"RENAME TABLE {old_filename} TO {new_filename};")
except:
continue #skip FileNotFoundError (csv) or error about filename already existing (sql)
if storagetype == "csv":
for a in list_existingoutputfiles1:
try:
num_file = list_existingoutputfiles1.index(a) + 1 #adjust from 0 to 1
old_filename = pathlib.Path(path_repo_and_csvfiles + "/" + a)
if num_file < 10:
new_filename = pathlib.Path(path_repo_and_csvfiles + "/" + outputname_userinput + '00'+str(num_file)+'.csv')
elif num_file >= 10 and num_file < 100:
new_filename = pathlib.Path(path_repo_and_csvfiles + "/" + outputname_userinput + '0'+str(num_file)+'.csv')
elif num_file >= 100 and num_file < 1000:
new_filename = pathlib.Path(path_repo_and_csvfiles + "/" + outputname_userinput +str(num_file)+'.csv')
os.rename(old_filename, new_filename)
except:
continue #skip FileNotFoundError (csv) or error about filename already existing (sql)
#log
if storagetype == "mysql":
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '%{outputname_userinput}%';")
myresult = cursor.fetchall()
previewlist_existingoutputfiles1 = [list(a.values())[0] for a in myresult]
print('list_existingoutputfiles1 (after num correction)'.ljust(55), previewlist_existingoutputfiles1) #log
if storagetype == "csv":
previewlist_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
if a.startswith(outputname_userinput):
previewlist_existingoutputfiles1.append(a)
print('list_existingoutputfiles1 (after num correction)'.ljust(55), previewlist_existingoutputfiles1) #log
# JUST ADDED
def deleteandrename_existingoutputs_sql_parenttable(max_output_amount, outputname_userinput):
print('\ndeleteandrename_existingoutputs_sql_parenttable()')
#get list of parenttable ids
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}parent order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('start', list_existingoutputfiles1)
#delete (also deletes rows from child table thru FK)
while True:
if len(list_existingoutputfiles1) >= max_output_amount:
#delete first rows - sql
cursor.execute(f"DELETE FROM {db_name1}.{outputname_userinput}parent where parenttable_id = {list_existingoutputfiles1[0]};")
#reinitialize list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}parent order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
#turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
else:
break
print('trimmed', list_existingoutputfiles1)
#rename
cursor.execute('SET FOREIGN_KEY_CHECKS=0;') #disable (only when updating parent table's rows (not needed when deleting))
for a in list_existingoutputfiles1:
new_parenttable_id = list_existingoutputfiles1.index(a) + 1 #adjust from 0 to 1
#old_parenttable_id = a
sql = f"UPDATE {db_name1}.{outputname_userinput}parent SET parenttable_id = {new_parenttable_id} where parenttable_id = {a};"
cursor.execute(sql)
cursor.execute('SET FOREIGN_KEY_CHECKS=1;') #re-enable (only when updating parent table's rows (not needed when deleting))
#reinitialize list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}parent order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('renamed', list_existingoutputfiles1)
# JUST ADDED
def deleteandrename_existingoutputs_sql_childtable(max_output_amount, outputname_userinput):
print('\ndeleteandrename_existingoutputs_sql_childtable()')
#get list of parenttable ids
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('start', list_existingoutputfiles1)
#delete, (already deleted by parent table thru FK)
while True:
if len(list_existingoutputfiles1) >= max_output_amount:
#delete first rows - sql
cursor.execute(f"DELETE FROM {db_name1}.{outputname_userinput}child where parenttable_id = {list_existingoutputfiles1[0]};")
#reinitialize list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
else:
break
print('trimmed', list_existingoutputfiles1, '(already trimmed using FK constraint, no change)')
#rename
for a in list_existingoutputfiles1:
new_parenttable_id = list_existingoutputfiles1.index(a) + 1 #adjust from 0 to 1
#old_parenttable_id = a
sql = f"UPDATE {db_name1}.{outputname_userinput}child SET parenttable_id = {new_parenttable_id} where parenttable_id = {a};"
cursor.execute(sql)
#reinitialize list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('renamed', list_existingoutputfiles1)
# OLD
def reformatandaddinfoto_symbolsdict(symbols):
#add function that adds details for list of found symbols
#reformat symbols dict
for k,v in symbols.items():
symbols[k] = {"mentions": v}
#updat symbols dict (add info like marketCap, latestPrice)
for k,v in symbols.items():
time.sleep(0.4)
#url = 'https://cloud.iexapis.com/stable/stock/' + str(k) + '/quote' + IEX_TOKEN
url = 'https://sandbox.iexapis.com/stable/stock/' + str(k) + '/quote' + IEX_TOKEN_SANDBOX
r = requests.get(url)
# print(r)
try:
j = r.json()
# print(j)
try:
j_val = j["marketCap"]
j_val = "$%.2f" % (j_val/1000000000) + "B" #{symbol: $20.00B}
symbols[k].update({"marketCap": j_val})
except:
# dict_symbolmc[str(k)] = 'None/possible crypto'
symbols[k].update({"marketCap": "NA/crypto"})
try:
j_val = j["latestPrice"]
j_val = "$%.2f" % (j_val) #{symbol: $20.00}
symbols[k].update({"latestPrice": j_val})
except:
symbols[k].update({"latestPrice": "NA/crypto"})
try:
j_val = j["changePercent"]
j_val = "%.2f" % (j_val*100) + "%" #{symbol: 0.02%}
symbols[k].update({"changePercent": j_val})
except:
symbols[k].update({"changePercent": "NA/crypto"})
try:
j_val = j["companyName"]
symbols[k].update({"companyName": j_val})
except:
symbols[k].update({"companyName": "NA/crypto"})
try:
j_val = j["peRatio"]
if j_val == "" or j_val == None: j_val = "NA"
symbols[k].update({"peRatio": j_val})
except:
symbols[k].update({"peRatio": "NA/crypto"})
except Exception as e:
print(e, '--', k, r.reason)
continue #try to bypass json.decoder error
# pprint.pprint(symbols)
# return symbols
endingvar = None
#gives raw int, instead of string number with $ or %
def reformatandaddinfoto_symbolsdict2(symbols, marketcap_min, marketcap_max):
#shorten
# #delete symbols based on marketcap (probably better after RSA because the symbol list is now 300-ish, instead of 11,000)
print("\nreformatandaddinfoto_symbolsdict2()")
list_removesymbols = []
for k in symbols.keys():
time.sleep(0.4)
url = 'https://sandbox.iexapis.com/stable/stock/' + str(k) + '/quote' + IEX_TOKEN_SANDBOX
r = requests.get(url)
try:
j = r.json()
j_val = j["marketCap"]
if j_val < marketcap_min or j_val > marketcap_max:
if j_val != 0:
list_removesymbols.append(k)
except Exception as e:
print(e, k, j_val)
continue #try avoid json error
for i in list_removesymbols:
del symbols[i]
print("removed symbols w/ >" + str(marketcap_max))
print("list_removesymbols", list_removesymbols)
print("symbols:", symbols)
#reformat symbols dict
for k,v in symbols.items():
symbols[k] = {"mentions": v}
#update symbols dict (add info like marketCap, latestPrice) or (delete symbols based on marketCap)
count_Null = 0
for k,v in symbols.items():
time.sleep(0.4)
#url = 'https://cloud.iexapis.com/stable/stock/' + str(k) + '/quote' + IEX_TOKEN
url = 'https://sandbox.iexapis.com/stable/stock/' + str(k) + '/quote' + IEX_TOKEN_SANDBOX
r = requests.get(url)
# print(r)
try:
j = r.json()
# print(j)
for a in ["marketCap", "latestPrice", "changePercent", "companyName", "peRatio"]:
j_val = j[a]
# if j_val == None or j_val == 0:
if j_val == None:
# print('note: j_val == ""/None: ', j_val, type(j_val))
symbols[k].update({a: NULL})
count_Null += 1
continue
if a == "changePercent":
j_val *= 100
# if a == "marketCap" and j_val > 1000000000 and j_val != 0:
# print("deleted ", k, symbols[k], j_val)
# del symbols[k]
# break
symbols[k].update({a: j_val})
# print('note: j_val == : ', j_val, type(j_val))
except Exception as e:
print(e, '--', k, r.reason)
for a in ["marketCap", "latestPrice", "changePercent", "companyName", "peRatio"]:
symbols[k].update({a: NULL})
continue #try to bypass json.decoder error
print("NULL count: " + str(count_Null))
# pprint.pprint(symbols)
# return symbols
endingvar = None
# OLD
def add_newoutputfile_csv_and_sql_empty(storagetype, outputname_generated, dt_string):
'''*****************************************************************************
#1 Create new output file, using outputname_generated
#2 Insert result and additional info
*****************************************************************************'''
if storagetype == "mysql":
# #1
cursor.execute(f"CREATE TABLE {db_name1}.{outputname_generated} (tickerId INT, symbol TEXT, mentions INT, marketCap DECIMAL(16,2), latestPrice DECIMAL(16,2), changePercent DECIMAL(16,2), peRatio DECIMAL(16,2), companyName TEXT, tableId INT, PRIMARY KEY (tickerId));")
if storagetype == "csv":
#1
if write_empty_newoutputfile == True:
with open(outputname_generated, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Date and time: ' + dt_string])
writer.writerow(['Empty file'])
# OLD
def add_newoutputfile_csv_and_sql(storagetype, outputname_generated, dt_string, info_subcount, info_marketCap_limit, info_parameters, info_ittookxseconds, symbols):
'''*****************************************************************************
#1 Create new output file, using outputname_generated
#2 Insert result and additional info
*****************************************************************************'''
if storagetype == "mysql":
# #1
# cursor.execute(f"CREATE TABLE {db_name1}.{outputname_generated} (Number INT, Symbols TEXT, Mentions INT, marketCap TEXT, latestPric TEXT, changePerc TEXT, peRatio TEXT, companyNam TEXT, PRIMARY KEY (Number));")
# #1 - improved
cursor.execute(f"CREATE TABLE {db_name1}.{outputname_generated} (Analysis_Id INT, Symbols TEXT, Mentions INT, marketCap DECIMAL(16,2), latestPrice DECIMAL(16,2), changePerc DECIMAL(16,2), peRatio DECIMAL(16,2), companyNam TEXT, Table_Id INT, PRIMARY KEY (Analysis_Id));")
#2
info_tickernumber = 1
for k,v in symbols.items():
coldata_00 = '%-10s' % info_tickernumber
coldata_01 = "%-10s" % k
coldata_02 = "%10s" % v.get('mentions')
# coldata_03 = "%10s" % senti.get('neg')
# coldata_04 = "%10s" % senti.get('neu')
# coldata_05 = "%10s" % senti.get('pos')
# coldata_06 = "%10s" % senti.get('compound')
coldata_07 = "%10s" % v.get('marketCap')
coldata_08 = "%10s" % v.get('latestPrice')
coldata_09 = "%10s" % v.get('changePercent')
coldata_10 = "%10s" % v.get('peRatio')
coldata_11 = "%10s" % v.get('companyName')
cursor.execute(f"INSERT INTO {db_name1}.{outputname_generated} (Number, Symbols, Mentions, marketCap, latestPric, changePerc, peRatio, companyNam) VALUES ('{coldata_00}', '{coldata_01}', '{coldata_02}', '{coldata_07}', '{coldata_08}', '{coldata_09}', '{coldata_10}', '{coldata_11}');" )
info_tickernumber += 1
connection.commit()
if storagetype == "csv":
#1 and 2 (should try separating into 1 and 2)
with open(outputname_generated, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Date and time: ' + dt_string])
writer.writerow([info_subcount])
writer.writerow([info_marketCap_limit])
writer.writerow([info_parameters])
writer.writerow([info_ittookxseconds])
writer.writerow(['number of tickers: ' + str(len(symbols))])
writer.writerow([])
maxlength_string = 10
col_00 = '%-10s' % 'Number'[:maxlength_string]
col_01 = "%-10s" % 'Symbols'[:maxlength_string]
col_02 = "%10s" % 'Mentions'[:maxlength_string]
# col_03 = "%10s" % 'Bearish'[:maxlength_string]
# col_04 = "%10s" % 'Neutral'[:maxlength_string]
# col_05 = "%10s" % 'Bullish'[:maxlength_string]
# col_06 = "%10s" % 'Total/Comp'[:maxlength_string]
col_07 = "%10s" % 'marketCap'[:maxlength_string]
col_08 = "%10s" % 'latestPrice'[:maxlength_string]
col_09 = "%10s" % 'changePercent'[:maxlength_string]
col_10 = "%10s" % 'peRatio'[:maxlength_string]
col_11 = "%10s" % 'companyName'[:maxlength_string]
#writer.writerow([col_00,col_01,col_02,col_03,col_04,col_05,col_06,col_07,col_08,col_09,col_10,col_11])
writer.writerow([col_00,col_01,col_02,
col_07,col_08,col_09,col_10,col_11])
info_tickernumber = 1
for k,v in symbols.items():
try:
coldata_00 = '%-10s' % info_tickernumber
coldata_01 = "%-10s" % k
coldata_02 = "%10s" % v.get('mentions')
# coldata_03 = "%10s" % senti.get('neg')
# coldata_04 = "%10s" % senti.get('neu')
# coldata_05 = "%10s" % senti.get('pos')
# coldata_06 = "%10s" % senti.get('compound')
coldata_07 = "%10s" % v.get('marketCap')
coldata_08 = "%10s" % v.get('latestPrice')
coldata_09 = "%10s" % v.get('changePercent')
coldata_10 = "%10s" % v.get('peRatio')
coldata_11 = "%10s" % v.get('companyName')
writer.writerow([coldata_00, coldata_01, coldata_02,
coldata_07, coldata_08, coldata_09, coldata_10, coldata_11])
info_tickernumber += 1
except AttributeError:
#colx_00 = '%-10s' % info_tickernumber
#k_ = "%-10s" % k
#v_ = "%10s" % v
#neg_ = "%10s" % 'X'
#neu_ = "%10s" % 'X'
#pos_ = "%10s" % 'X'
#compound_ = "%10s" % 'X'
#writer.writerow([colx_00, k_, v_, neg_, neu_, pos_, compound_,mc_, price_, pctchange_, name_])
#writer.writerow([colx_00, k_, v_,mc_, price_, pctchange_, name_])
continue
# OLD
def add_newoutputfile_csv_and_sql2(new_ref_number, storagetype, outputname_generated, dt_string, info_subcount, info_marketCap_limit, info_parameters, info_ittookxseconds, symbols):
'''*****************************************************************************
#1 Create new output file, using outputname_generated
#2 Insert result and additional info
*****************************************************************************'''
if storagetype == "mysql":
# #1
cursor.execute(f"CREATE TABLE {db_name1}.{outputname_generated} (tickerId INT, symbol TEXT, mentions INT, marketCap DECIMAL(16,2), latestPrice DECIMAL(16,2), changePercent DECIMAL(16,2), peRatio DECIMAL(16,2), companyName TEXT, tableId INT, PRIMARY KEY (tickerId));")
#2
info_tickernumber = 1
for k,v in symbols.items():
coldata_00 = info_tickernumber
coldata_01 = "'%s'" % k
if coldata_01 == "'NULL'": coldata_01 = "NULL"
coldata_02 = v.get('mentions')
# coldata_03 = senti.get('neg')
# coldata_04 = senti.get('neu')
# coldata_05 = senti.get('pos')
# coldata_06 = senti.get('compound')
coldata_07 = v.get('marketCap')
coldata_08 = v.get('latestPrice')
coldata_09 = v.get('changePercent')
coldata_10 = v.get('peRatio')
coldata_11 = "'%s'" % v.get('companyName')
if coldata_11 == "'NULL'": coldata_11 = "NULL"
coldata_12 = new_ref_number
# don't use f string because it can't put 'NULL' as NULL, use % ()
query1="INSERT INTO %s (tickerId, symbol, mentions, marketCap, latestPrice, changePercent, peRatio, companyName, tableId) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
query1 = query1 % (f"{db_name1}.{outputname_generated}", coldata_00, coldata_01, coldata_02,
coldata_07, coldata_08, coldata_09, coldata_10, coldata_11, coldata_12)
try: cursor.execute(query1)
except: print("error:",query1)
info_tickernumber += 1
connection.commit()
if storagetype == "csv":
#1 and 2 (should try separating into 1 and 2)
with open(outputname_generated, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Date and time: ' + dt_string])
writer.writerow([info_subcount])
writer.writerow([info_marketCap_limit])
writer.writerow([info_parameters])
writer.writerow([info_ittookxseconds])
writer.writerow(['number of tickers: ' + str(len(symbols))])
writer.writerow([])
maxlength_string = 10
col_00 = '%-10s' % 'Number'[:maxlength_string]
col_01 = "%-10s" % 'Symbols'[:maxlength_string]
col_02 = "%10s" % 'Mentions'[:maxlength_string]
# col_03 = "%10s" % 'Bearish'[:maxlength_string]
# col_04 = "%10s" % 'Neutral'[:maxlength_string]
# col_05 = "%10s" % 'Bullish'[:maxlength_string]
# col_06 = "%10s" % 'Total/Comp'[:maxlength_string]
col_07 = "%10s" % 'marketCap'[:maxlength_string]
col_08 = "%10s" % 'latestPrice'[:maxlength_string]
col_09 = "%10s" % 'changePercent'[:maxlength_string]
col_10 = "%10s" % 'peRatio'[:maxlength_string]
col_11 = "%10s" % 'companyName'[:maxlength_string]
#writer.writerow([col_00,col_01,col_02,col_03,col_04,col_05,col_06,col_07,col_08,col_09,col_10,col_11])
writer.writerow([col_00,col_01,col_02,
col_07,col_08,col_09,col_10,col_11])
info_tickernumber = 1
for k,v in symbols.items():
try:
coldata_00 = '%-10s' % info_tickernumber
coldata_01 = "%-10s" % k
coldata_02 = "%10s" % v.get('mentions')
# coldata_03 = "%10s" % senti.get('neg')
# coldata_04 = "%10s" % senti.get('neu')
# coldata_05 = "%10s" % senti.get('pos')
# coldata_06 = "%10s" % senti.get('compound')
coldata_07 = "%10s" % v.get('marketCap')
coldata_08 = "%10s" % v.get('latestPrice')
coldata_09 = "%10s" % v.get('changePercent')
coldata_10 = "%10s" % v.get('peRatio')
coldata_11 = "%10s" % v.get('companyName')
writer.writerow([coldata_00, coldata_01, coldata_02,
coldata_07, coldata_08, coldata_09, coldata_10, coldata_11])
info_tickernumber += 1
except AttributeError:
#colx_00 = '%-10s' % info_tickernumber
#k_ = "%-10s" % k
#v_ = "%10s" % v
#neg_ = "%10s" % 'X'
#neu_ = "%10s" % 'X'
#pos_ = "%10s" % 'X'
#compound_ = "%10s" % 'X'
#writer.writerow([colx_00, k_, v_, neg_, neu_, pos_, compound_,mc_, price_, pctchange_, name_])
#writer.writerow([colx_00, k_, v_,mc_, price_, pctchange_, name_])
continue
def add_newoutputfile_parenttable_empty(new_ref_number, outputname_userinput, time1_rsafinished):
print("\nadd_newoutputfile_parenttable()")
print(db_name1, new_ref_number, outputname_userinput)
sql = f"INSERT INTO {db_name1}.{outputname_userinput}parent (parenttable_id, subreddit_count, upvote_ratio, ups, limit_reddit, upvotes, picks, picks_ayz, seconds_took, comments_analyzed, datetime, tickers_found, tickers_rsa, min_market_cap, max_market_cap) VALUES ({new_ref_number}, 64, 0.5, 20, 1, 2, 100, 100, 800.91, 480, '{time1_rsafinished}', 11796, 350, 1000, 4000000000);"
cursor.execute(sql)
connection.commit()
#preview list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}parent order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('end', list_existingoutputfiles1)
# JUST ADDED
def add_newoutputfile_childtable_empty(new_ref_number, outputname_userinput, time1_rsafinished):
print("\add_newoutputfile_childtable_empty()")
query1="INSERT INTO %schild (ticker_id, symbol, mentions, market_cap, latest_price, change_percent, pe_ratio, company_name, datetime, parenttable_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, '%s', %s)"
query1 = query1 % (f"{db_name1}.{outputname_userinput}", 1, "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", time1_rsafinished, new_ref_number)
cursor.execute(query1)
connection.commit()
#preview list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('end', list_existingoutputfiles1)
# JUST ADDED
def add_newoutputfile_parenttable(outputname_userinput, new_ref_number, subreddit_count, upvoteRatio, ups, limit, upvotes, picks, picks_ayz, seconds_took, c_analyzed, time1_rsafinished, us, symbols, marketcap_min, marketcap_max):
print("\nadd_newoutputfile_parenttable()")
sql = f"INSERT INTO {db_name1}.{outputname_userinput}parent (parenttable_id, subreddit_count, upvote_ratio, ups, limit_reddit, upvotes, picks, picks_ayz, seconds_took, comments_analyzed, datetime, tickers_found, tickers_rsa, min_market_cap, max_market_cap) VALUES ({new_ref_number}, {subreddit_count}, {upvoteRatio}, {ups}, {limit}, {upvotes}, {picks}, {picks_ayz}, {seconds_took}, {c_analyzed}, '{time1_rsafinished}', {len(us)}, {len(symbols)}, {marketcap_min}, {marketcap_max});"
cursor.execute(sql)
connection.commit()
#preview list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}parent order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('end', list_existingoutputfiles1)
# JUST ADDED
def add_newoutputfile_childtable(new_ref_number, outputname_userinput, symbols, time1_rsafinished):
print("\nadd_newoutputfile_childtable()")
info_tickernumber = 1
for k,v in symbols.items():
coldata_00 = info_tickernumber
coldata_01 = "'%s'" % k
if coldata_01 == "'NULL'": coldata_01 = "NULL"
coldata_02 = v.get('mentions')
# coldata_03 = senti.get('neg')
# coldata_04 = senti.get('neu')
# coldata_05 = senti.get('pos')
# coldata_06 = senti.get('compound')
coldata_07 = v.get('marketCap')
coldata_08 = v.get('latestPrice')
coldata_09 = v.get('changePercent')
coldata_10 = v.get('peRatio')
coldata_11 = "'%s'" % v.get('companyName')
if coldata_11 == "'NULL'": coldata_11 = "NULL"
# time1_rsafinished = "'%s'" % time1_rsafinished
coldata_12 = new_ref_number
# don't use f string because it can't put 'NULL' as NULL, use % ()
query1="INSERT INTO %schild (ticker_id, symbol, mentions, market_cap, latest_price, change_percent, pe_ratio, company_name, datetime, parenttable_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, '%s', %s)"
query1 = query1 % (f"{db_name1}.{outputname_userinput}", coldata_00, coldata_01, coldata_02,
coldata_07, coldata_08, coldata_09, coldata_10, coldata_11, time1_rsafinished, coldata_12)
try: cursor.execute(query1)
except Exception as e: print(e, "error:",query1)
info_tickernumber += 1
connection.commit()
#preview list of parenttable ids
list_existingoutputfiles1 = []
sql = f"select parenttable_id from {db_name1}.{outputname_userinput}child order by parenttable_id ASC;"
cursor.execute(sql)
result = cursor.fetchall()
# pprint.pprint(result)
# turn into list
list_existingoutputfiles1 = [list(a.values())[0] for a in result]
# remove duplicates
list_existingoutputfiles1 = list(dict.fromkeys(list_existingoutputfiles1))
print('end', list_existingoutputfiles1)
def print_logs3(outputname_userinput, outputname_generated):
print()
if storagetype == "mysql":
cursor.execute(f"SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{db_name1}' AND table_name like '%{outputname_userinput}%';")
myresult = cursor.fetchall()
previewlist_existingoutputfiles1 = [list(a.values())[0] for a in myresult]
print('existing sql tables (parent/child)', previewlist_existingoutputfiles1) #log
if storagetype == "csv":
previewlist_existingoutputfiles1 = []
for a in os.listdir(path_repo_and_csvfiles):
if a.startswith(outputname_userinput):
previewlist_existingoutputfiles1.append(a)
print('existing csv tables', previewlist_existingoutputfiles1) #log
dt_string = datetime.datetime.now().strftime("%m/%d/%Y %H:%M")
print("Date and Time: " + dt_string + " (End main)")
print('Created and wrote ' + outputname_generated)
print()
def main(input, outputname_userinput, parameter_subs, marketcap_min, marketcap_max):
'''*****************************************************************************
# refresh/reestablish connection to mysql database = ?
# prepare variables - (for deleting/renaming existing output files/adding new output file) - preview only
# close connection
*****************************************************************************'''
if storagetype == "mysql":
connection, cursor = connect_to_mysql()
#one table version
new_ref_number = prepare_variables1_sql_parentandchildtables(outputname_userinput, max_output_amount)
outputname_generated = outputname_userinput
print("PREVIEW ONLY")
cursor.close()
connection.close()
#if storagetype == "csv"
#traditional
# outputname_generated, list_existingoutputfiles1, new_ref_number = prepare_variables1_csv_and_sql(storagetype, outputname_userinput, max_output_amount)
'''*****************************************************************************
#1 get list of subreddits (from csv file) - (for Reddit Sentinment Analysis)
*****************************************************************************'''
subs = getlist_subreddits(parameter_subs)
'''*****************************************************************************
#2 get list of tickers and detailed info from an api - (for Reddit Sentinment Analysis)
*****************************************************************************'''
# us = getlist_nasdaq_csvfile(input)
# #PROBLEM_2: CAUSES MEMORY ISSUE ON AWS..
# us, dict_symbolmc, dict_symbolprice, dict_symbolpctchange, dict_name = getlist_nasdaq_api(marketcap_min, marketcap_max)
# #TEMPORARY SOLUTION 1
#us, dict_symbolmc, dict_symbolprice, dict_symbolpctchange, dict_name = getlist_from_textfile()
us = getlist_from_textfile()
# print("us", us)
# #TEMPORARY SOLUTION 2
#us, dict_symbolmc, dict_symbolprice, dict_symbolpctchange, dict_name = getlist_nasdaq_api_chunk(marketcap_min, marketcap_max)
# #TEMPORARY SOLUTION 3 (abandoned)
#url = "https://api.nasdaq.com/api/screener/stocks?tableonly=true&limit=10"
#download_file_separate(url)
'''*****************************************************************************
# prepare additional-info variables - (put additional-info into new output file)
# print logs
*****************************************************************************'''
dt_string, info_subcount, info_marketCap_limit, subreddit_count = prepare_variables2_additional_info(subs, marketcap_max)
print_logs1(dt_string, outputname_generated, info_subcount, info_marketCap_limit, us) #only for csv files
# sys.exit("Forced exit!")
'''*****************************************************************************
# Reddit Sentiment Analysis
*****************************************************************************'''
if write_empty_newoutputfile == False:
start_time = time.time()
#if write_empty_newoutputfile == False:
# open reddit client
reddit = praw.Reddit(
user_agent=os.environ.get('reddit_user_agent'),
client_id=os.environ.get('reddit_client_id'),
client_secret=os.environ.get('reddit_client_secret'),
username=os.environ.get('reddit_username'),
password=os.environ.get('reddit_password')
)
#not working..
# reddit = praw.Reddit(
# user_agent,
# client_id,
# client_secret,
# username,
# password
# )
# posts, c_analyzed, tickers, titles, a_comments, picks, subs, picks_ayz, info_parameters = data_extractor(reddit, subs, us)
posts, c_analyzed, tickers, titles, a_comments, picks, subs, picks_ayz, info_parameters, upvoteRatio, ups, limit, upvotes = data_extractor(reddit, subs, us)
print('data_extractor finished')
# symbols, times, top, info_ittookxseconds = print_helper(tickers, picks, c_analyzed, posts, subs, titles, time, start_time)
symbols, times, top, info_ittookxseconds, seconds_took = print_helper(tickers, picks, c_analyzed, posts, subs, titles, time, start_time)
print('print_helper finished')
#PROBLEM_3: Seems to not work on AWS's due to excessive memory usage...
if use_sentiment_analysis_and_visualization == True:
scores = sentiment_analysis(picks_ayz, a_comments, symbols, us)
print('sentiment_analysis finished')
visualization(picks_ayz, scores, picks, times, top)
print('visualization finished')
print_logs2(symbols, scores)
time1_rsafinished = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# print(time1_rsafinished, type(time1_rsafinished))
'''*****************************************************************************
# refresh/reestablish connection to mysql database = ?
*****************************************************************************'''
if storagetype == "mysql":
connection, cursor = connect_to_mysql()
'''*****************************************************************************
# create missing tables if needed
# setup FK and trigger (after-delete)
*****************************************************************************'''
create_missingtables_and_clearparenttable(outputname_userinput)
setup_foreign_key_and_after_delete_trigger(outputname_userinput)
'''*****************************************************************************
# update output file
*****************************************************************************'''
# might be causing MEMORYERROR - probably not
# deleteandrename_existingoutputfiles_csv_and_sql(storagetype, list_existingoutputfiles1, max_output_amount, outputname_userinput)
if storagetype == "mysql":
#one table version
new_ref_number = prepare_variables1_sql_parentandchildtables(outputname_userinput, max_output_amount)
outputname_generated = outputname_userinput
print("ACTUAL REF NUMBER")
deleteandrename_existingoutputs_sql_parenttable(max_output_amount, outputname_userinput)
deleteandrename_existingoutputs_sql_childtable(max_output_amount, outputname_userinput)
'''*****************************************************************************
# fix/update symbol dictionary with more info
*****************************************************************************'''
if write_empty_newoutputfile == False:
#if write_empty_newoutputfile == False:
# might be causing MEMORYERROR - ?
# dict_symbolmc = {'AAPL': '$3035.xB', 'MSFT': '$2514.xB', 'GOOG': '$1974.xB', 'GOOGL': '$1967.xB', 'AMZN': '$1786.xB'}
# dict_symbolmc = {}
# dict_symbolprice = {'AAPL': '$175.x', 'MSFT': '$334.x', 'GOOG': '$2974.x', 'GOOGL': '$2963.x', 'AMZN': '$3523.x'}
# dict_symbolpctchange = {'AAPL': '2.x%', 'MSFT': '0.x%', 'GOOG': '0.x%', 'GOOGL': '0.x%', 'AMZN': '-0.x%'}
# dict_name = {'AAPL': ' Apple Inc. Common Stock', 'MSFT': ' Microsoft Corporation Common Stock', 'GOOG': ' Alphabet Inc. Class C Capital Stock', 'GOOGL': ' Alphabet Inc. Class A Common Stock', 'AMZN': ' Amazon.com, Inc. Common Stock'}
# add_newoutputfile_csv_old(outputname_generated, dt_string, info_subcount, info_marketCap_limit, info_parameters, info_ittookxseconds, symbols, dict_symbolmc, dict_symbolprice, dict_symbolpctchange, dict_name)
# #OR
# might be causing MEMORYERROR - testing, probbably not
reformatandaddinfoto_symbolsdict2(symbols, marketcap_min, marketcap_max)
'''*****************************************************************************
# add new output file
*****************************************************************************'''
# if write_empty_newoutputfile == False:
# add_newoutputfile_csv_and_sql2(new_ref_number, storagetype, outputname_generated, dt_string, info_subcount, info_marketCap_limit, info_parameters, info_ittookxseconds, symbols)
# if write_empty_newoutputfile == True:
# add_newoutputfile_csv_and_sql_empty(storagetype, outputname_generated, dt_string)
if storagetype == "mysql":
if write_empty_newoutputfile == True:
add_newoutputfile_parenttable_empty(new_ref_number, outputname_userinput, time1_rsafinished)
add_newoutputfile_childtable_empty(new_ref_number, outputname_userinput, time1_rsafinished)
if write_empty_newoutputfile == False:
add_newoutputfile_parenttable(outputname_userinput, new_ref_number, subreddit_count, upvoteRatio, ups, limit, upvotes, picks, picks_ayz, seconds_took, c_analyzed, time1_rsafinished, us, symbols, marketcap_min, marketcap_max)
if symbols != {}:
add_newoutputfile_childtable(new_ref_number, outputname_userinput, symbols, time1_rsafinished)
elif symbols == {}:
add_newoutputfile_childtable_empty(new_ref_number, outputname_userinput, time1_rsafinished)
'''*****************************************************************************
# print logs
# close connection
*****************************************************************************'''
print_logs3(outputname_userinput, outputname_generated)
if storagetype == "mysql":
cursor.close()
connection.close()
def run_batch_of_processes_1():
start_time = time.time()
'''*****************************************************************************
# create separate process for each function
# should reinitialize those process variables if starting them multiple time (in while loop or schedule module)
*****************************************************************************'''
# way 1 - local machine
# process_1 = Process(target=main, args=(input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1))
# process_2 = Process(target=main, args=(input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3))
# process_3 = Process(target=main, args=(input_api_nasdaq, output_filename4, subs_membercount_min1, marketcap_min0, marketcap_max4))
# way 2 - test
process_1 = Process(target=main, args=(input_api_nasdaq, 'result_test1_', subs_specificlist1, marketcap_min0, marketcap_max1))
process_2 = Process(target=main, args=(input_api_nasdaq, 'result_test2_', subs_specificlist1, marketcap_min0, marketcap_max3))
process_3 = Process(target=main, args=(input_api_nasdaq, 'result_test3_', subs_specificlist1, marketcap_min0, marketcap_max4))
'''*****************************************************************************
# starts the processes
*****************************************************************************'''
process_1.start(); process_2.start(); process_3.start()
'''*****************************************************************************
# wait till they all finish and close them
*****************************************************************************'''
process_1.join(); process_2.join(); process_3.join()
print("--- %s seconds ---" % (time.time() - start_time));print()
if __name__ == '__main__':
'''*****************************************************************************
# WAY 4 - run program by multiprocessing once (for aws with cron jobs i guess)
# testing
*****************************************************************************'''
#run_batch_of_processes_1() ##immediate, test
'''*****************************************************************************
# WAY 0 - run program normally
# Parameter: program_number
*****************************************************************************'''
#print("WAY 0 rsa.py used")
# main(input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1) ##stable RDS
# main(input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1) ##stable
#main(input_api_nasdaq, output_filename1_RDS, subs_membercount_min1, marketcap_min0, marketcap_max1) ##linux/window test large
main(input_api_nasdaq, output_filename4_RDS, subs_specificlist1, marketcap_min0, marketcap_max4) ##linux/window test small
#main(input_api_nasdaq, output_filename0, subs_membercount_min2, marketcap_min0, marketcap_max4) ##linux test - testing getlist_subreddits - WORKING, needs TESTING
#main(input_api_nasdaq, output_filename0, subs_specificlist1, marketcap_min0, marketcap_max4)
'''*****************************************************************************
# WAY 1 - run program by schedule (old)
# Parameter: program_number
*****************************************************************************'''
#####schedule.every().day.at("01:00")
#####schedule.every().minute.at(":08").do(main, csvfile6)
###idea = main(input, outputname_userinput, marketcap, 0, subreddit members, etc.)
###idea = main(input_csvfile, savedtickers4b, subs_membercount_min1, 4,000,000,000, member counts)
###idea = main(input_csvfile, savedtickers200b, subs_membercount_min1, 200,000,000,000, 200,000)
# program_number = 1
# schedule.every().day.at("23:55").do(nltk.download, 'wordnet')
# if program_number == 1:
# #program one
# main(input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1) ##
# schedule.every().day.at("00:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("03:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("06:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("09:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("12:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("15:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("18:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# schedule.every().day.at("21:00").do(main, input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min0, marketcap_max1)
# if program_number == 2:
# #program two
# #main(input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# schedule.every().day.at("00:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# #schedule.every().day.at("03:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# schedule.every().day.at("06:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# schedule.every().day.at("09:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# schedule.every().day.at("12:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# #schedule.every().day.at("15:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# schedule.every().day.at("18:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# #schedule.every().day.at("21:00").do(main, input_api_nasdaq, output_filename3, subs_membercount_min1, marketcap_min0, marketcap_max3)
# if program_number == 3:
# #program three
# main(input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# schedule.every().day.at("00:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# #schedule.every().day.at("03:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# schedule.every().day.at("06:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# schedule.every().day.at("09:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# schedule.every().day.at("12:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# #schedule.every().day.at("15:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# schedule.every().day.at("18:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# #schedule.every().day.at("21:00").do(main, input_api_nasdaq, output_filename4, subs_specificlist1, marketcap_min0, marketcap_max4)
# while True:
# schedule.run_pending()
'''*****************************************************************************
# WAY 1.1 - run program by cmd lines (for aws with cron jobs ig) (old?)
# Parameter: program_number
*****************************************************************************'''
#print(sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], 'akak3'
#cmd lines 1
####python rsa.py api.nasdaq.com result_test_ ['wallstreetbets'] 800000000 && rsa.py api.nasdaq.com result_test_ ['wallstreetbets'] 500000000 &
####python rsa.py api.nasdaq.com result_test_ ['wallstreetbets'] 800000000 & rsa.py api.nasdaq.com result_test_ ['wallstreetbets'] 500000000 &
####python rsa.py api.nasdaq.com result_test_ ['wallstreetbets'] 800000000
#main(str(sys.argv[1]), str(sys.argv[2]), list(sys.argv[3]), int(sys.argv[4]))
#cmd lines 2
####python rsa.py api.nasdaq.com result_test_ 0 15000000000 && python rsa.py api.nasdaq.com result_test_ 0 9000000000000 &
####python rsa.py api.nasdaq.com result_test_ 0 15000000000 & python rsa.py api.nasdaq.com result_test_ 0 9000000000000 &
####python rsa.py api.nasdaq.com result_test_ 0 120000000 && python rsa.py api.nasdaq.com result_test_ 0 450000000 &
####python rsa.py api.nasdaq.com result_test_ 0 120000000 & python rsa.py api.nasdaq.com result_test_ 0 450000000 &
####python rsa.py api.nasdaq.com result_test_ 0 120000000
#main(str(sys.argv[1]), str(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
'''*****************************************************************************
# WAY 2 - run program for n times with delay
*****************************************************************************'''
# print("using way 2 - run program by fixed intervals (old)")
# for n in range(200):
# #main(input_api_nasdaq, output_filename1, subs_membercount_min1, marketcap_min2, marketcap_max1) ##linux/window test small (1 sub)
# main(input_api_nasdaq, output_filename1_RDS, subs_membercount_min1, marketcap_min0, marketcap_max1) ##linux/window test large (64 subs)
# time.sleep(15)
# subs_membercount_min1,subs_specificlist1
# input("Press any key to continue . . . (1) ")
# input("Press any key to continue . . . (2) ")
# input("Press any key to continue . . . (3) ")
# input("Press any key to continue . . . (4) ")
'''*****************************************************************************
# WAY 3 - run program by schedule & multiprocessing (good for local machine.. just one click to run)
# testing
*****************************************************************************'''
# schedule.every().day.at("00:00").do(run_batch_of_processes_1)
# #schedule.every().day.at("03:00").do(run_batch_of_processes_1)
# schedule.every().day.at("06:00").do(run_batch_of_processes_1)
# schedule.every().day.at("09:00").do(run_batch_of_processes_1)
# schedule.every().day.at("12:00").do(run_batch_of_processes_1)
# #schedule.every().day.at("15:00").do(run_batch_of_processes_1)
# schedule.every().day.at("18:00").do(run_batch_of_processes_1)
# schedule.every().day.at("21:00").do(run_batch_of_processes_1)
# # run_batch_of_processes_1() ##immediate, test
# while True:
# schedule.run_pending()
endingvar = None
|
scheduler.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
#
# Contributor: qiulimao<qiulimao@getqiu.com>
# http://www.getqiu.com
#
# Created on 2014-02-07 17:05:11
# Modified on 2016-10-26 20:46:20
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from weblocust.libs import counter, utils
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': 3*60,# every web page changes within 3 minutes
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 10 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self.task_queue = dict()
self._last_tick = int(time.time())
self._sent_finished_event = dict()
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
#就是说没有强制更新,并且没有到更新时间就跳过
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = {}
self.projects[project['name']].update(project)
self.projects[project['name']]['md5sum'] = utils.md5string(project['script'])
if not self.projects[project['name']].get('active_tasks', None):
self.projects[project['name']]['active_tasks'] = deque(maxlen=self.ACTIVE_TASKS)
# load task queue when project is running and delete task_queue when project is stoped
if project['status'] in ('RUNNING', 'DEBUG'):
if project['name'] not in self.task_queue:
self._load_tasks(project['name'])
self.task_queue[project['name']].rate = project['rate']
self.task_queue[project['name']].burst = project['burst']
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
self.on_select_task({
'taskid': '_on_get_info',
'project': project['name'],
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': ['min_tick', 'retry_delay'],
},
'process': {
'callback': '_on_get_info',
},
})
else:
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
if project not in self._cnt['all']:
self._update_project_cnt(project['name'])
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
self.task_queue[project] = TaskQueue(rate=0, burst=0)
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
self.task_queue[project].put(taskid, priority, exetime)
logger.debug('project: %s loaded %d tasks.', project, len(self.task_queue[project]))
if self.projects[project]['status'] in ('RUNNING', 'DEBUG'):
self.task_queue[project].rate = self.projects[project]['rate']
self.task_queue[project].burst = self.projects[project]['burst']
else:
self.task_queue[project].rate = 0
self.task_queue[project].burst = 0
if project not in self._cnt['all']:
self._update_project_cnt(project)
self._cnt['all'].value((project, 'pending'), len(self.task_queue[project]))
def _update_project_cnt(self, project):
status_count = self.taskdb.status_count(project)
self._cnt['all'].value(
(project, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.task_queue:
if task['project'] in self.projects:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
else:
logger.error('unknown project: %s', task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.task_queue[task['project']].put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
self.projects[task['project']].update(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.task_queue[task['project']]:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if project['status'] not in ('DEBUG', 'RUNNING'):
continue
if project.get('min_tick', 0) == 0:
continue
if self._last_tick % int(project['min_tick']) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project['name'],
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project, task_queue in iteritems(self.task_queue):
if cnt >= limit:
break
# task queue
self.task_queue[project].check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project, taskid))
project_cnt += 1
cnt += 1
cnt_dict[project] = project_cnt
if project_cnt:
self._sent_finished_event[project] = 'need'
# check and send finished event to project
elif len(task_queue) == 0 and self._sent_finished_event.get(project) == 'need':
self._sent_finished_event[project] = 'sent'
self.on_select_task({
'taskid': 'on_finished',
'project': project,
'url': 'data:,on_finished',
'status': self.taskdb.SUCCESS,
'process': {
'callback': 'on_finished',
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project['status'] != 'STOP':
continue
if now - project['updatetime'] < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project['group']):
continue
logger.warning("deleting project: %s!", project['name'])
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
del self.projects[project['name']]
self.taskdb.drop(project['name'])
self.projectdb.drop(project['name'])
if self.resultdb:
self.resultdb.drop(project['name'])
for each in self._cnt.values():
del each[project['name']]
def __len__(self):
return sum(len(x) for x in itervalues(self.task_queue))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("loading projects")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from weblocust.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x['active_tasks']) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.task_queue[task['project']]) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.task_queue[task['project']].done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects.get(task['project'], {})
retry_delay = project_info.get('retry_delay', None) or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['group'] = project_info.get('group')
task['project_md5sum'] = project_info.get('md5sum')
task['project_updatetime'] = project_info.get('updatetime', 0)
project_info['active_tasks'].appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_weblocust():
'''Close weblocust'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
shell.interact(
'weblocust shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_weblocust() - Close weblocust'
)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from weblocust.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
#self.threads = threads
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
# copy from binux,avoid database locked.
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
dynconf_light.py
|
#
# DYNCONF LIGHT: Dynamic Configuration
# Config generator, administrator, and retriever based on Jinja2 templates,
# CSV data, and Netmiko SSH/Telnet Sessions for Cisco IOS and Junos
#
# 2018 Dyntek Services Inc.
# Kenneth J. Grace <kenneth.grace@dyntek.com>
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import sys, os
from jinja2 import Environment, BaseLoader
from netmiko import ConnectHandler, ssh_exception
import paramiko
import threading, copy, datetime, math
import csv, json
from optparse import OptionParser
VERSION = '1.6.10'
"""
VERISON NOTES:
1.5.1: Stable, dumps to super log, does not save json data. No plugin operation. No object orientation. Utilizes multiprocessing.
1.6: Dev, dumps to super and device log, dumps json data. Can plugin to expand operation. Object Orientation. Utilizes threading for reduced complexity.
1.6.1: Bug for options.mode fixed, and template_filename now not part of implied schema.
1.6.2: username and password now not part of implied schema.
1.6.3: Value Error for handling dropped config administrations.
1.6.4: Enable password and Line-Password handling
1.6.5: Security and inclusion of summary files.
1.6.6: Attempt to clean up, abort, lol.
1.6.7: Monkey patch for SSH to Telnet failovers
1.6.8: Monkey patch for super-logging and command except failures
1.6.9: KeyboardInterrupt Handling, SEND_FAILED retries, Retry Logging Append
1.6.10: Fist Fuck the device over SSH if he doesn't like my commands
"""
def patch_crypto_be_discovery():
"""
Monkey patches cryptography's backend detection.
Objective: support pyinstaller freezing.
"""
from cryptography.hazmat import backends
try:
from cryptography.hazmat.backends.commoncrypto.backend import backend as be_cc
except ImportError:
be_cc = None
try:
from cryptography.hazmat.backends.openssl.backend import backend as be_ossl
except ImportError:
be_ossl = None
backends._available_backends_list = [
be for be in (be_cc, be_ossl) if be is not None
]
patch_crypto_be_discovery()
class Session:
datum_schema = ["host","device_type"]
maxThreads = 3
def __init__(self, data, template, default_username='admin', default_password='Password1', default_secret='Secret1', directory=None, mode='RENDER', **kwargs):
self.id = 'session'
if 'id' in kwargs.keys():
self.id = kwargs['id']
self.directory = directory
self.mode = mode
self.devices = []
#Perform Data Validation
host_list = []
id_list = []
for datum in data:
if 'id' in datum:
if datum['id'] not in id_list:
id_list.append(datum['id'])
else:
raise SessionError('Atleast two devices have the same id variable. This is not allowed.')
if 'host' in datum:
if datum['host'] not in host_list:
host_list.append(datum['host'])
else:
raise SessionError('Atleast two devices have the same host variable. This is not allowed.')
if 'password' not in datum:
datum['password'] = default_password
if 'username' not in datum:
datum['username'] = default_username
if 'secret' not in datum:
datum['secret'] = default_secret
#Load Device Objects
for datum in data:
device_template = template
if all(prop in list(datum.keys()) for prop in self.datum_schema):
#Username if not defined or empty, then set to default_username
try:
if datum['username'] == '':
datum['username'] = default_username
except KeyError:
datum['username'] = default_username
#Password if not defined or empty, then set to default_password
try:
if datum['password'] == '':
datum['password'] = default_password
except KeyError:
datum['password'] = default_password
#Secret if not defined or empty, then set to default_secret
try:
if datum['secret'] == '':
datum['secret'] = default_secret
except:
datum['secret'] = default_secret
try:
if 'template_filename' in datum:
if datum['template_filename'] != '':
if os.path.exists(datum['template_filename']):
with open(datum['template_filename'], 'r') as f:
device_template = f.read()
else:
raise SessionError('Template filename does not exist for {}'.format(datum['host']))
except TypeError:
pass
device = Device(**datum)
tpl = Environment(loader=BaseLoader).from_string(device_template)
device.assign(tpl.render(datum))
self.devices.append(device)
else:
raise SessionError('Atleast one device does not meet the Dynconf data schema')
@classmethod
def initFromFiles(cls, data_filename, template_filename, *args, **kwargs):
data = []
template = ""
with open(data_filename, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
data.append(row)
with open(template_filename, 'r') as f:
template = f.read()
return cls(data, template, *args, **kwargs)
def administer(self, devices=None, ignore_ids=[]):
if not devices:
devices = self.devices
#Create Threads
if self.mode != 'RENDER':
threads = []
length = math.ceil(len(devices)/self.maxThreads)
batch = []
for device in devices:
if device.id not in ignore_ids:
batch.append(device)
if len(batch) >= length:
threads.append(threading.Thread(target=Session.__administerBatch, args=(self, batch)))
batch = []
if len(batch) > 0:
threads.append(threading.Thread(target=Session.__administerBatch, args=(self, batch)))
#Start Threads
try:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
except KeyboardInterrupt:
print("Waiting for Active Threads to Finish...")
else:
raise SessionError('A Render Mode Session Can Not Administer')
def render(self):
for device in self.devices:
device.saveInput(self.directory)
def __administerBatch(self, batch):
for device in batch:
device.connect(self.mode, self.directory)
def recure(self):
self.active = True
def loop(self):
r_cnt = 0
ignore=[]
v = len(self.devices)
while (len(ignore) < v) and self.active:
print('RECURSION {0} [{1}/{2}]'.format(r_cnt, v-len(ignore), v))
r_cnt+=1
self.administer(ignore_ids=ignore)
for device in self.devices:
if (device.log['flag'] == 'PASS') and device.id not in ignore:
ignore.append(device.id)
t = threading.Thread(target=loop, args=(self,))
t.start()
while self.active:
i = input('> ').lower()
if i == 'stop':
self.active = False
def writeSessionLog(self):
with open('{0}/{1}.log'.format(self.directory, self.id), 'w') as f:
for device in self.devices:
f.write('\n'.join(device.formatLog()))
def saveSessionLog(self):
with open('{0}/{1}.json'.format(self.directory, self.id), 'w') as f:
sessionLog = []
for device in self.devices:
sessionLog.append(device.log)
json.dump(sessionLog, f, indent=2)
def writeSessionSummary(self):
with open('{0}/{1}.summary.log'.format(self.directory, self.id), 'w') as f:
f.write('\nDevices Listed:\n')
row = ['HOST_ID', 'IP_ADDRESS', 'DEVICE_FLAG', 'DEVICE_DESCRIPTION']
f.write(''.join(col.ljust(16) for col in row))
f.write('\n')
for device in self.devices:
data = device.log
row = [data['id'], data['host'], data['flag'], data['description']]
f.write(''.join(col.ljust(16) for col in row))
f.write('\n')
class Device:
def __init__(self, host, device_type, username, password, port='22', secret='', **kwargs):
self.id = host
if 'id' in kwargs.keys():
self.id = kwargs['id']
if 'input' in kwargs.keys():
self.assign(kwargs['input'])
if ('telnet' in device_type) and (port=='22'):
port = '23'
self.connectionData = {'host': host, 'device_type': device_type, 'username': username, 'password': password, 'port': port, 'secret': secret}
self.log = {'id': self.id, 'host':host, 'username':username, 'password':password, 'port':port, 'flag': 'INIT', 'description': 'INITIALIZED'}
self.attempts = 0
def assign(self, input):
self.input = input
def connect(self, mode='CONFIGURE', directory=None, super_log=[]):
self.attempts += 1
if self.input:
try:
device = ConnectHandler(**self.connectionData)
except ssh_exception.NetMikoAuthenticationException:
self.log['flag'], self.log['description'] = 'ERROR', 'BAD_AUTH'
except ssh_exception.NetMikoTimeoutException:
self.log['flag'], self.log['description'] = 'ERROR', 'TIMEOUT'
except ValueError:
self.log['flag'], self.log['description'] = 'ERROR', 'VALUE'
except ConnectionRefusedError:
self.log['flag'], self.log['description'] = 'ERROR', 'REFUSED'
except paramiko.ssh_exception.SSHException:
self.log['flag'], self.log['description'] = 'ERROR', 'SSH'
else:
if device:
try:
if mode == 'CONFIGURE':
self.log['output'] = [{'in':self.input , 'out': device.send_config_set(self.input)}]
elif mode == 'SHOW':
device.enable()
t_outs = []
cmds = self.input.splitlines()
for cmd in cmds:
while True:
try:
t_out = {'in':cmd, 'out':device.send_command_expect(cmd)}
except IOError:
print('{0} - Trying Again - \"{1}\"'.format(self.id, cmd))
else:
break
t_outs.append(t_out)
self.log['output'] = t_outs
self.log['flag'], self.log['description'] = 'PASS', 'ADMINISTERED'
except ValueError:
self.log['flag'], self.log['description'] = 'ERROR', 'MANUAL_REQUIRED'
finally:
device.disconnect()
finally:
print('{2} @ {3} - {0}:{1}'.format(self.log['flag'], self.log['description'], self.id, self.connectionData['host']))
# Basically, in the event that we failed and it WASNT a timeout, then we want to try connecting again via another protocol
if self.log['flag'] == 'ERROR':
if self.log['description'] != 'SEND_FAILED':
if self.log['description'] != 'TIMEOUT' and self.attempts < 2:
old_description = self.log['description']
if self.connectionData['device_type'] == 'cisco_ios_telnet':
self.connectionData['device_type'] = 'cisco_ios'
self.connectionData['port'] = '22'
print('\t{} -> Error Occurred on Telnet. Trying SSH.'.format(self.id))
self.log = self.connect(mode=mode, directory=directory, super_log=super_log)
self.log['description'] += '&'+old_description
elif self.connectionData['device_type'] == 'cisco_ios':
self.connectionData['device_type'] = 'cisco_ios_telnet'
self.connectionData['port'] = '23'
print('\t{} -> Error Occurred on SSH. Trying Telnet.'.format(self.id))
self.log = self.connect(mode=mode, directory=directory, super_log=super_log)
self.log['description'] += '&'+old_description
else:
#In the event that connection timed out during send, we want to try again and again till we pass
print('\t{} -> Send Failed. Trying Again.'.format(self.id))
self.log = self.connect(mode=mode, directory=directory, super_log=super_log)
if directory:
self.writeLog(directory)
super_log.append(self.log)
else:
raise DeviceError('Device attempted connetion before any input assignment.')
return self.log
def formatLog(self):
lines = []
def line_break(line_char, info):
ch_len = 86 - len(info)
br_str = '\n{0} {1} {0}\n'.format(line_char*(int(ch_len/2)), info.upper())
return br_str
lines.append(line_break('#', self.log['id']))
lines.append(line_break('@', '{0}: {1}'.format(self.log['flag'], self.log['description'])))
if 'output' in self.log:
for output in self.log['output']:
lines.append(line_break('=', output['in']))
lines += output['out'].split('\n')
return lines
def saveInput(self, directory):
if self.input:
with open('{0}/{1}.conf'.format(directory, self.id), 'w') as f:
f.write(self.input)
else:
raise DeviceError('Device can not save input. No Input assigned.')
def writeLog(self, directory):
with open('{0}/{1}.log'.format(directory, self.id), 'w') as f:
f.write('\n'.join(self.formatLog()))
class DynconfError(Exception):
pass
class DeviceError(DynconfError):
pass
class SessionError(DynconfError):
pass
def main(*args, **kwargs):
print("### DYNCONF V{0} ###\n".format(VERSION))
print("©2018 Dyntek Services Inc.\nKenneth J. Grace\nEmail: kenneth.grace@dyntek.com\n")
optparser = OptionParser(usage="usage: %prog [options]")
optparser.add_option('-u', '--username', dest='default_username', default='admin',
help='Default username for device connections')
optparser.add_option('-p', '--password', dest='default_password', default='Password1',
help='Default password for device connections')
optparser.add_option('-s', '--secret', dest='default_secret', default='Secret1',
help='Default secret for device connections')
optparser.add_option('-t', '--template', dest='template_filename',
help='Read template from a jinja2 or Txt file')
optparser.add_option('-d', '--data', dest='data_filename',
help='Read variables from a CSV or Json file')
optparser.add_option('-m', '--mode', dest='mode',
help='Set the mode for device administration (SHOW, CONFIGURE, RENDER)')
optparser.add_option('-r', '--recure', action='store_true', dest='recure', default=False,
help='Recure over all devices till stopped.')
optparser.add_option('--threads', dest='maxThreads', default=10,
help='assign max number of simultaneous threads')
optparser.add_option('--output', dest='directory',
help='Set the output directory for program output')
(options, args) = optparser.parse_args()
while (not options.data_filename) or (not os.path.exists(options.data_filename)):
options.data_filename = input('Data Filename [*.json, *.csv]: ')
while (not options.template_filename) or (not os.path.exists(options.template_filename)):
options.template_filename = input('Template Filename [*.txt, *.j2]: ')
if options.mode:
options.mode = options.mode.upper()
while (not options.mode) or (options.mode not in ['CONFIGURE','SHOW','RENDER']):
options.mode = input('Mode [CONFIGURE, SHOW, RENDER]: ').upper()
if not options.directory:
if options.mode != 'RENDER':
options.directory = '{}.output'.format(options.data_filename[0:options.data_filename[1:].find('.')+1])
else:
options.directory = '{}.render'.format(options.data_filename[0:options.data_filename[1:].find('.')+1])
try:
if not os.path.exists(options.directory):
os.makedirs(options.directory)
except FileExistsError:
pass
session = Session.initFromFiles(**vars(options))
session.maxThreads = int(options.maxThreads)
if options.mode != 'RENDER':
if not options.recure:
session.administer()
else:
session.recure()
else:
session.render()
session.writeSessionLog()
session.writeSessionSummary()
try:
session.saveSessionLog()
except SessionError:
pass
if __name__ == '__main__':
main()
|
remot3.it.connect.py
|
#!/Users/harley/anaconda3/bin/python
import json
from json import dumps
from urllib.request import urlopen
import requests
import httplib2
from terminaltables import AsciiTable
import subprocess
import pyperclip
import config
URL = config.URL
DEVELOPERKEY = config.DEVELOPERKEY
USERNAME = config.USERNAME
PASSWORD = config.PASSWORD
PAYLOAD = "{ \"username\" : \"%s\", \"password\" : \"%s\" }" % (USERNAME, PASSWORD)
HEADERS = {
'developerkey': DEVELOPERKEY,
'content-type': "application/json",
'cache-control': "no-cache"
}
try:
RESPONSE = requests.request("POST", URL, data=PAYLOAD, headers=HEADERS)
except ValueError:
print("Error in making request to remot3.it")
print(RESPONSE.status_code)
DATA = json.loads(RESPONSE.text)
# This is the token generated with every API call
token = (DATA['token'])
apiMethod = "https://"
apiServer = "api.remot3.it"
apiVersion = "/apv/v23.5"
# add the token here which you got from the /user/login API call
# token = "your login token"
deviceListURL = apiMethod + apiServer + apiVersion + "/device/list/all"
# print ("deviceListURL:", deviceListURL)
content_type_header = "application/json"
# print("Developer key is:", developerkey)
# print("token is:", token)
deviceListHeaders = {
'Content-Type': content_type_header,
'developerkey': DEVELOPERKEY,
# you need to get token from a call to /user/login
'token': token,
}
if __name__ == '__main__':
httplib2.debuglevel = 0
http = httplib2.Http()
response, content = http.request(deviceListURL,
'GET',
headers=deviceListHeaders)
myListOfDevicesJson = json.loads(content.decode('utf-8'))
print("Token is:", token, "\t\t\tNumber of devices =",
len(myListOfDevicesJson["devices"]) + 1)
print("myListOfDevicesJson", myListOfDevicesJson["devices"])
# for device in myListOfDevices["devices"]:
# print (device["devicealias"])
#print (myListOfDevicesJson["devices"][2])
deviceListArray = [["No", "Active?", "Name", "LastContacted", "Created"]]
i = 1
# for d in (myListOfDevicesJson["devices"]):
# if (d["devicestate"] == 'active'):
# dArray = [ i, d["devicestate"], d["devicealias"], d["lastcontacted"], d["createdate"]]
# deviceListArray.append(dArray)
# i +=1
for d in (myListOfDevicesJson["devices"]):
dArray = [i, d["devicestate"], d["devicealias"],
d["lastcontacted"], d["createdate"]]
deviceListArray.append(dArray)
i += 1
table = AsciiTable(deviceListArray)
print(table.table)
inputString = input("Which sensor to connect? ")
# replace this with the actual UID of your device that you got from /device/list/all
chosenDeviceId = myListOfDevicesJson["devices"][int(
inputString) - 1]["devicealias"]
UID = myListOfDevicesJson["devices"][int(inputString) - 1]["deviceaddress"]
print("you entered", inputString, "\tDevice ID:", chosenDeviceId, "UID:", UID)
print("other values", myListOfDevicesJson["devices"][int(inputString) - 1])
# you'll need to send a valid login token from /user/login
queried_ip = urlopen('http://ip.42.pl/raw').read().decode('utf-8'),
# ip address return in this format: b'192.59.106.43'
# need to truncate string on left due to addition of 1 "b" letter = bytes
home_ip = '216.15.40.152'
my_ip = str(queried_ip[0])
print("developerkey", DEVELOPERKEY)
print("token", token)
print("UID", UID)
print("IP address:", my_ip)
def proxyConnect(UID, token):
httplib2.debuglevel = 0
http = httplib2.Http()
content_type_header = "application/json"
# this is equivalent to "whatismyip.com"
# in the event your router or firewall reports a malware alert
# replace this expression with your external IP as given by
# whatismyip.com
proxyConnectURL = apiMethod + apiServer + apiVersion + "/device/connect"
proxyHeaders = {
'Content-Type': content_type_header,
'developerkey': DEVELOPERKEY,
'token': token
}
proxyBody = {
'deviceaddress': UID,
'hostip': my_ip,
'wait': "true"
}
response, content = http.request(proxyConnectURL,
'POST',
headers=proxyHeaders,
body=dumps(proxyBody),
)
try:
print("Response", response)
cnxnData = json.loads(content.decode('utf-8'))
proxyLink = cnxnData["connection"]["proxy"]
# ["connection"]["proxy"]
# print ("Data is:\n", cnxnData)
return proxyLink
except KeyError:
print("Key Error exception!")
print("Content is:\n", content)
if __name__ == '__main__':
proxyLink = proxyConnect(UID, token)
# MYSTRING = "http://proxy8.yoics.net:32352"
TRUNCSTRING = proxyLink[7:]
PROXY = TRUNCSTRING[:-6]
PORT = proxyLink[-5:]
SSHPARAMS = 'ssh -l pi '+ PROXY +" -p "+ PORT
print("Connect to RPI:", SSHPARAMS)
print("Link is copied to the clipboard. CTRL-V")
pyperclip.copy(SSHPARAMS)
spam = pyperclip.paste()
# class ssh:
# shell = None
# client = None
# transport = None
# def __init__(self, address, username, password, sshport):
# print("Connecting to server:", username, "@", str(address), "-p", sshport)
# self.client = paramiko.client.SSHClient()
# self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
# self.client.connect(address, port=sshport, username=username, password=password, look_for_keys=False)
# self.transport = paramiko.Transport((address, sshport))
# self.transport.connect(username=username, password=password)
# thread = threading.Thread(target=self.process)
# thread.daemon = True
# thread.start()
# def closeConnection(self):
# if(self.client != None):
# self.client.close()
# self.transport.close()
# def openShell(self):
# self.shell = self.client.invoke_shell()
# def sendShell(self, command):
# if(self.shell):
# self.shell.send(command + "\n")
# else:
# print("Shell not opened.")
# def process(self):
# global connection
# while True:
# # Print data when available
# if self.shell != None and self.shell.recv_ready():
# alldata = self.shell.recv(1024)
# while self.shell.recv_ready():
# alldata += self.shell.recv(1024)
# strdata = str(alldata, "utf8")
# strdata.replace('\r', '')
# print(strdata, end = "")
# if(strdata.endswith("$ ")):
# print("\n$ ", end = "")
# sshUsername = "pi"
# sshPassword = "agd1n"
# sshServer = PROXY
# sshPort = PORT
# connection = ssh(sshServer, sshUsername, sshPassword, int(PORT) )
# connection.openShell()
# while True:
# command = input('$ ')
# if command.startswith(" "):
# command = command[1:]
# connection.sendShell(command)
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warning(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise an exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warning(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0
if guest.id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used // units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail // units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
def is_migratable_xml_flag(self):
"""Determines whether libvirt is supporting dump XML suitable for
migration.
"""
return getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None) is not None
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
@unittest.skip("Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/12885")
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-5, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-5, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 16, 64]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/12901")
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 10):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)
assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
if __name__ == '__main__':
import nose
nose.runmodule()
|
tcp_proxy_encoded.py
|
#!/usr/bin/env python3
import os
import json
import socket
import threading
from selectors import DefaultSelector, EVENT_READ
# Proxy开放的端口号
LOCAL_PORT = 7088
# 连接的远程服务器与端口,修改成你的远程服务器地址
REMOTE_ADDR = "hachinasp.duckdns.org"
REMOTE_PORT = 7088
def xor_encode( bstring ):
"""一个简单编码:两次编码后与原值相同"""
MASK = 0x55
ret = bytearray( bstring )
for i in range(len(ret)):
ret[i] ^= MASK
return ret
def proxy_process_encoded( sock1, sock2 ):
"""在两个sockek之间转发数据:任何一个收到的,编码后转发到另一个"""
sel = DefaultSelector()
sel.register(sock1, EVENT_READ)
sel.register(sock2, EVENT_READ)
while True:
events = sel.select()
for (key,ev) in events:
try:
data_in = key.fileobj.recv(8192)
except ConnectionResetError as e:
print(key.fileobj, "\nreset receive!")
sock1.close()
sock2.close()
return
if data_in:
if key.fileobj==sock1:
sock2.send(xor_encode(data_in))
else:
sock1.send(xor_encode(data_in))
else:
sock1.close()
sock2.close()
return
def tcp_proxy(sock_in, addr):
"""新的代理请求连接时,进行相关处理"""
print("新的连接: %s:%s..." % addr, flush=True)
# 建立远程连接
sock_remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_remote.settimeout(15)
try:
sock_remote.connect((REMOTE_ADDR, REMOTE_PORT))
except Exception as e:
print(e, flush=True)
print( "Error when connect to", (REMOTE_ADDR, REMOTE_PORT), flush=True )
sock_in.close()
return
# 在本地连接与远程连接间转发数据
proxy_process_encoded( sock_in, sock_remote )
def start_server():
"""主服务函数"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", LOCAL_PORT))
s.listen()
print("等待客户端连接...", flush=True)
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcp_proxy, args=(sock, addr))
t.start()
if __name__ == "__main__":
os.system("iptables -A INPUT -p tcp --sport {} --tcp-flags RST RST -j DROP".format(REMOTE_PORT))
start_server()
|
app.py
|
# packages that need to be pip installed
import praw
from psaw import PushshiftAPI
# packages that come with python
import traceback
from multiprocessing import Process, Value
from time import sleep, time
# other files
import config
import database
from setInterval import setInterval
rows = []
reddit = praw.Reddit(client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password,
user_agent=config.user_agent)
api = PushshiftAPI(reddit)
@setInterval(1800)
def delete_comment():
try:
for comment in reddit.redditor('RepostCheckerBot').comments.new(limit=50):
if comment.score < -1:
f = open('fails.txt', 'a')
f.write(str(comment.body))
comment.delete()
except Exception as e:
print(e)
print(repr(e))
if '503' in str(e):
print('503 from server')
if '504' in str(e):
print('504 from server')
if '401' in str(e):
print('401 from server')
else:
f = open('errs.txt', 'a')
f.write('{}\n'.format(str(traceback.format_exc())))
# the main function
class FindPosts(Process):
def __init__(self, sub_settings):
# Constructor.
Process.__init__(self)
self.sub_settings = sub_settings
self.v = Value('i', 0)
def run(self):
Process(target=self.find_top_posts).start()
self.findNewPosts()
def find_top_posts(self):
subreddit = reddit.subreddit(self.sub_settings[0])
print(self.sub_settings)
new = False
first_time = True
print('Starting searching...')
while True:
try:
post = 0
# first get 50 posts from the top of the subreddit
for submission in api.search_submissions(subreddit=subreddit):
while True:
if (self.v.value != 0) or first_time:
try:
x = self.v.value
except IndexError as e:
if 'deque index out of range' not in str(e):
raise IndexError(e)
if first_time or (x is not None and x == 2):
first_time = False
top = True
hot = False
post += 1
result = database.is_logged(
submission.url,
submission.media,
submission.selftext,
submission.permalink,
submission.created_utc,
top,
hot,
new,
self.sub_settings,
reddit,
)
if result != [['delete', -1, -1, -1, -1, -1]] and (result == [] or submission.created_utc != result[0][2]):
rows.append(database.add_post(
submission.created_utc,
submission.url,
submission.media,
submission.permalink,
submission.selftext,
submission.author,
submission.title,
top,
hot,
new,
self.sub_settings[0],
self.sub_settings[8]
))
print('{} --> Added {}'.format(
post,
submission.permalink,
))
self.v.value = 1
break
except Exception as e:
print(traceback.format_exc())
if '503' in str(e):
print('503 from server')
if '401' in str(e):
print('401 from server')
else:
f = open('errs.txt', 'a')
error = str(traceback.format_exc())
f.write(error)
def findNewPosts(self):
subreddit = reddit.subreddit(self.sub_settings[0])
top = False
hot = False
new = True
limit_val = self.sub_settings[6]
while True:
try:
post = 0
# then get 1000 posts from new of the subreddit
for submission in api.search_submissions(subreddit=subreddit, limit=limit_val):
while True:
if self.v.value != 0:
try:
x = self.v.value
except IndexError as e:
if 'deque index out of range' not in str(e):
raise IndexError(e)
if x is not None and x == 1:
post += 1
result = database.is_logged(
submission.url,
submission.media,
submission.selftext,
submission.permalink,
submission.created_utc,
top,
hot,
new,
self.sub_settings,
reddit,
)
if result != [['delete', -1, -1, -1, -1, -1]] and (result == [] or submission.created_utc != result[0][2]):
rows.append(database.add_post(
submission.created_utc,
submission.url,
submission.media,
submission.permalink,
submission.selftext,
submission.author,
submission.title,
top,
hot,
new,
self.sub_settings[0],
self.sub_settings[8],
))
print('{} --> Added {}'.format(
post,
submission.permalink,
))
if result != [] and result != [['delete', -1, -1, -1, -1, -1]]:
print('reported')
# report and make a comment
submission.report('REPOST ALERT')
cntr = 0
table = ''
for i in result:
table = '{}{}|[{}](https://reddit.com{})|{}|{}%|{}\n'.format(
table,
str(cntr),
i[5],
i[0],
i[1],
str(i[3]),
i[4],
)
cntr += 1
full_text = 'I have detected that this may be a repost: \n'+ \
'\nNum|Post|Date|Match|Author\n:--:|:--:|:--:|:--:|:--:\n{}'.format(table) + \
'\n*Beep Boop* I am a bot | [Source](https://github.com/xXAligatorXx/repostChecker)' + \
'| Contact u/XXAligatorXx for inquiries | The bot will delete its message at -2 score'
do_this = True
while do_this:
try:
submission.reply(full_text)
do_this = False
except:
do_this = True
self.v.value = 2
break
limit_val = 10
except Exception as e:
print(traceback.format_exc())
if '503' in str(e):
print('503 from server')
if '401' in str(e):
print('401 from server')
else:
f = open('errs.txt', 'a')
error = str(traceback.format_exc())
f.write(error)
thread_count = 0
threads = []
for i in config.sub_settings:
if i is not None:
database.init_database(i[0], i[8])
threads.append(FindPosts(i))
if i[1] is not None or i[2] is not None or i[3] is not None:
database.delete_old_loop(i)
threads[thread_count].start()
thread_count += 1
delete_comment()
for i in range(0, len(threads)):
threads[i].join()
|
config_manager.py
|
import fnmatch
import logging
import os
import sys
import threading
import time
from configparser import (ConfigParser, DuplicateSectionError,
DuplicateOptionError, InterpolationError,
ParsingError)
from datetime import datetime
from types import FrameType
from typing import List, Optional, Callable
import pika
from pika.adapters.blocking_connection import BlockingChannel
from pika.exceptions import AMQPChannelError, AMQPConnectionError
from watchdog.events import FileSystemEvent
from watchdog.observers.polling import PollingObserver
from src.abstract.publisher_subscriber import \
QueuingPublisherSubscriberComponent
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils import env
from src.utils import routing_key
from src.utils.constants.rabbitmq import (
CONFIG_EXCHANGE, HEALTH_CHECK_EXCHANGE, CONFIGS_MANAGER_HEARTBEAT_QUEUE,
PING_ROUTING_KEY, HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY, TOPIC)
from src.utils.constants.starters import RE_INITIALISE_SLEEPING_PERIOD
from src.utils.exceptions import (MessageWasNotDeliveredException,
ConnectionNotInitialisedException)
from .config_update_event_handler import ConfigFileEventHandler
from ..utils.logging import log_and_print
_FIRST_RUN_EVENT = 'first run'
class ConfigsManager(QueuingPublisherSubscriberComponent):
"""
This class reads all configurations and sends them over to the "config"
topic in Rabbit MQ. Updated configs are sent as well
"""
def __init__(self, name: str, logger: logging.Logger, config_directory: str,
rabbit_ip: str, file_patterns: Optional[List[str]] = None,
ignore_file_patterns: Optional[List[str]] = None,
ignore_directories: bool = True, case_sensitive: bool = False):
"""
Constructs the ConfigsManager instance
:param config_directory: The root config directory to watch.
This is searched recursively.
:param file_patterns: The file patterns in the directory to watch.
Defaults to all ini files
:param ignore_file_patterns: Any file patterns to ignore.
Defaults to None
:param ignore_directories: Whether changes in directories should be
ignored. Default: True
:param case_sensitive: Whether the patterns in `file_patterns` and
`ignore_file_patterns` are case sensitive. Defaults to False
"""
if not file_patterns:
file_patterns = ['*.ini']
self._name = name
self._config_directory = config_directory
self._file_patterns = file_patterns
self._watching = False
self._connected_to_rabbit = False
self._current_thread = None
logger.debug("Creating config RabbitMQ connection")
rabbitmq = RabbitMQApi(
logger.getChild("config_{}".format(RabbitMQApi.__name__)),
host=rabbit_ip)
super().__init__(logger, rabbitmq,
env.CONFIG_PUBLISHING_QUEUE_SIZE)
self._logger.debug("Creating heartbeat RabbitMQ connection")
self._heartbeat_rabbit = RabbitMQApi(
logger.getChild("heartbeat_{}".format(RabbitMQApi.__name__)),
host=rabbit_ip)
self._event_handler = ConfigFileEventHandler(
self._logger.getChild(ConfigFileEventHandler.__name__),
self._on_event_thrown,
file_patterns,
ignore_file_patterns,
ignore_directories,
case_sensitive
)
self._observer = PollingObserver()
self._observer.schedule(self._event_handler, config_directory,
recursive=True)
def __str__(self) -> str:
return self.name
@property
def name(self) -> str:
return self._name
def _initialise_rabbitmq(self) -> None:
while True:
try:
self._connect_to_rabbit()
self._logger.info("Connected to Rabbit")
self.rabbitmq.confirm_delivery()
self._logger.info("Enabled delivery confirmation on configs"
"RabbitMQ channel")
self.rabbitmq.exchange_declare(
CONFIG_EXCHANGE, TOPIC, False, True, False, False
)
self._logger.info("Declared %s exchange in Rabbit",
CONFIG_EXCHANGE)
self._heartbeat_rabbit.confirm_delivery()
self._logger.info("Enabled delivery confirmation on heartbeat"
"RabbitMQ channel")
self._heartbeat_rabbit.exchange_declare(
HEALTH_CHECK_EXCHANGE, TOPIC, False, True, False, False
)
self._logger.info("Declared %s exchange in Rabbit",
HEALTH_CHECK_EXCHANGE)
self._logger.info(
"Creating and binding queue '%s' to exchange '%s' with "
"routing key '%s", CONFIGS_MANAGER_HEARTBEAT_QUEUE,
HEALTH_CHECK_EXCHANGE, PING_ROUTING_KEY)
self._heartbeat_rabbit.queue_declare(
CONFIGS_MANAGER_HEARTBEAT_QUEUE, False, True, False, False)
self._logger.debug("Declared '%s' queue",
CONFIGS_MANAGER_HEARTBEAT_QUEUE)
self._heartbeat_rabbit.queue_bind(
CONFIGS_MANAGER_HEARTBEAT_QUEUE, HEALTH_CHECK_EXCHANGE,
PING_ROUTING_KEY)
self._logger.debug("Bound queue '%s' to exchange '%s'",
CONFIGS_MANAGER_HEARTBEAT_QUEUE,
HEALTH_CHECK_EXCHANGE)
# Pre-fetch count is set to 300
prefetch_count = round(300)
self._heartbeat_rabbit.basic_qos(prefetch_count=prefetch_count)
self._logger.debug("Declaring consuming intentions")
self._heartbeat_rabbit.basic_consume(
CONFIGS_MANAGER_HEARTBEAT_QUEUE, self._process_ping, True,
False, None)
break
except (ConnectionNotInitialisedException,
AMQPConnectionError) as connection_error:
# Should be impossible, but since exchange_declare can throw
# it we shall ensure to log that the error passed through here
# too.
self._logger.error(
"Something went wrong that meant a connection was not made")
self._logger.error(connection_error)
raise connection_error
except AMQPChannelError:
# This error would have already been logged by the RabbitMQ
# logger and handled by RabbitMQ. As a result we don't need to
# anything here, just re-try.
time.sleep(RE_INITIALISE_SLEEPING_PERIOD)
def _connect_to_rabbit(self) -> None:
if not self._connected_to_rabbit:
self._logger.info("Connecting to the config RabbitMQ")
self.rabbitmq.connect_till_successful()
self._logger.info("Connected to config RabbitMQ")
self._logger.info("Connecting to the heartbeat RabbitMQ")
self._heartbeat_rabbit.connect_till_successful()
self._logger.info("Connected to heartbeat RabbitMQ")
self._connected_to_rabbit = True
else:
self._logger.info(
"Already connected to RabbitMQ, will not connect again")
def disconnect_from_rabbit(self) -> None:
if self._connected_to_rabbit:
self._logger.info("Disconnecting from the config RabbitMQ")
self.rabbitmq.disconnect_till_successful()
self._logger.info("Disconnected from the config RabbitMQ")
self._logger.info("Disconnecting from the heartbeat RabbitMQ")
self._heartbeat_rabbit.disconnect_till_successful()
self._logger.info("Disconnected from the heartbeat RabbitMQ")
self._connected_to_rabbit = False
else:
self._logger.info("Already disconnected from RabbitMQ")
def _send_heartbeat(self, data_to_send: dict) -> None:
self._logger.debug("Sending heartbeat to the %s exchange",
HEALTH_CHECK_EXCHANGE)
self._logger.debug("Sending %s", data_to_send)
self._heartbeat_rabbit.basic_publish_confirm(
exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY, body=data_to_send,
is_body_dict=True, properties=pika.BasicProperties(delivery_mode=2),
mandatory=True)
self._logger.debug("Sent heartbeat to %s exchange",
HEALTH_CHECK_EXCHANGE)
def _process_ping(self, ch: BlockingChannel,
method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes) -> None:
self._logger.debug("Received %s. Let's pong", body)
try:
heartbeat = {
'component_name': self.name,
'is_alive': self._observer.is_alive(),
'timestamp': datetime.now().timestamp(),
}
self._send_heartbeat(heartbeat)
except MessageWasNotDeliveredException as e:
# Log the message and do not raise it as heartbeats must be
# real-time
self._logger.error("Error when sending heartbeat")
self._logger.exception(e)
def _on_event_thrown(self, event: FileSystemEvent) -> None:
"""
When an event is thrown, it reads the config and sends it as a dict via
rabbitmq to the config exchange of type topic
with the routing key determined by the relative file path.
:param event: The event passed by watchdog
:return None
"""
self._logger.debug("Event thrown: %s", event)
self._logger.info("Detected a config %s in %s", event.event_type,
event.src_path)
if event.event_type == "deleted":
self._logger.debug("Creating empty dict")
config_dict = {}
else:
config = ConfigParser()
self._logger.debug("Reading configuration")
try:
config.read(event.src_path)
except (
DuplicateSectionError, DuplicateOptionError,
InterpolationError, ParsingError
) as e:
self._logger.error(e.message)
# When the config is invalid, we do nothing and discard this
# event.
return None
self._logger.debug("Config read successfully")
config_dict = {key: dict(config[key]) for key in config}
self._logger.debug("Config converted to dict: %s", config_dict)
# Since the watcher is configured to watch files in
# self._config_directory we only need check that (for get_routing_key)
config_folder = os.path.normpath(self._config_directory)
key = routing_key.get_routing_key(event.src_path, config_folder)
self._logger.debug("Sending config %s to RabbitMQ with routing key %s",
config_dict, key)
self._push_to_queue(config_dict, CONFIG_EXCHANGE, key)
@property
def config_directory(self) -> str:
return self._config_directory
@property
def watching(self) -> bool:
return self._watching
@property
def connected_to_rabbit(self) -> bool:
return self._connected_to_rabbit
def start(self) -> None:
"""
This method is used to start rabbit and the observer and begin watching
the config files. It also sends the configuration files for the first
time
:return None
"""
log_and_print("{} started.".format(self), self._logger)
self._initialise_rabbitmq()
"""
We want to start a thread that connects to rabbitmq and begins attempts
to send configs.
"""
self._create_and_start_sending_configs_thread()
def do_first_run_event(name: str) -> None:
event = FileSystemEvent(name)
event.event_type = _FIRST_RUN_EVENT
self._on_event_thrown(event)
self._logger.info("Throwing first run event for all config files")
self.foreach_config_file(do_first_run_event)
if not self._watching:
self._logger.info("Starting config file observer")
self._observer.start()
self._watching = True
else:
self._logger.info("File observer is already running")
self._logger.debug("Config file observer started")
self._connect_to_rabbit()
self._listen_for_data()
def _sending_configs_thread(self) -> None:
while True:
try:
if not self.publishing_queue.empty():
try:
self._send_data()
except MessageWasNotDeliveredException as e:
self.logger.exception(e)
except (ConnectionNotInitialisedException,
AMQPConnectionError) as e:
# If the connection is not initialised or there is a connection
# error, we need to restart the connection and try it again
self._logger.error("There has been a connection error")
self._logger.exception(e)
self._logger.info("Restarting the connection")
self._connected_to_rabbit = False
# Wait some time before reconnecting and then retrying
time.sleep(RE_INITIALISE_SLEEPING_PERIOD)
self._connect_to_rabbit()
self._logger.info("Connection restored, will attempt sending "
"the config.")
except AMQPChannelError:
# This error would have already been logged by the RabbitMQ
# logger and handled by RabbitMQ. Since a new channel is
# created we need to re-initialise RabbitMQ
self._initialise_rabbitmq()
raise e
self.rabbitmq.connection.sleep(10)
def _create_and_start_sending_configs_thread(self) -> None:
try:
self._current_thread = threading.Thread(
target=self._sending_configs_thread)
self._current_thread.start()
except Exception as e:
self._logger.error("Failed to start sending configs thread!")
self._logger.exception(e)
raise e
def _terminate_and_stop_sending_configs_thread(self) -> None:
if self._current_thread is not None:
self._current_thread.join()
self._current_thread = None
def _listen_for_data(self) -> None:
self._logger.info("Starting the config ping listener")
self._heartbeat_rabbit.start_consuming()
def _on_terminate(self, signum: int, stack: FrameType) -> None:
"""
This method is used to stop the observer and join the threads
"""
log_and_print("{} is terminating. Connections with RabbitMQ will be "
"closed, and afterwards the process will exit."
.format(self), self._logger)
if self._watching:
self._logger.info("Stopping config file observer")
self._observer.stop()
self._observer.join()
self._watching = False
self._logger.debug("Config file observer stopped")
else:
self._logger.info("Config file observer already stopped")
self.disconnect_from_rabbit()
self._terminate_and_stop_sending_configs_thread()
log_and_print("{} terminated.".format(self), self._logger)
sys.exit()
def foreach_config_file(self, callback: Callable[[str], None]) -> None:
"""
Runs a function over all the files being watched by this class
:param callback: The function to watch. Must accept a string for the
file path as {config_directory} + {file path}
:return: Nothing
"""
for root, dirs, files in os.walk(self.config_directory):
for name in files:
if any([fnmatch.fnmatch(name, pattern) for pattern in
self._file_patterns]):
callback(os.path.join(root, name))
|
HPLC_control.py
|
# This could become a mess...
# what needs to be done is switch the lamps on, which works over serial.
# the rest is just sending commands to the console, possibly also to another machine
# https://www.dataapex.com/documentation/Content/Help/110-technical-specifications/110.020-command-line-parameters/110.020-command-line-parameters.htm?Highlight=command%20line
import socket
import subprocess
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Union
import tenacity
from flowchem.exceptions import InvalidConfiguration
try:
# noinspection PyUnresolvedReferences
from flowchem.components.devices.Knauer.Knauer_HPLC_NDA import Lamp_Command
HAS_KNAUER_COMMANDS = True
except ModuleNotFoundError:
HAS_KNAUER_COMMANDS = False
raise ModuleNotFoundError("You need to get the NDA communication from Knauer.")
# Todo should have a command constructor dataclass, would be more neat. For now, will do without to get it running asap
# TODO Very weird, when starting from synthesis, fractioning valve is blocked. no idea why, it's ip is not used.
class ClarityInterface:
def __init__(
self,
remote: bool = False,
host: str = None,
port: int = None,
path_to_executable: str = None,
instrument_number: int = 1,
):
if not HAS_KNAUER_COMMANDS:
raise InvalidConfiguration(
"Knauer Lamps unusable: no Knauer Commands available.\n"
"Contact your distributor to get the serial API documentation."
)
# just determine path to executable, and open socket if for remote usage
self.remote = remote
self.instrument = instrument_number
self.path_to_executable = path_to_executable
if self.remote:
self.interface = MessageSender(host, port)
self.command_executor = self.interface.open_socket_and_send
else:
self.command_executor = ClarityExecutioner.execute_command # type:ignore
# TODO would have to have some way to fail
@classmethod
def from_config(cls, config_dict: dict):
try:
pass
except:
pass
# if remote execute everything on other PC, else on this
# Todo doesn't make sense here, done other way
def execute_command(self, command_string):
if self.remote:
self.command_executor(command_string)
else:
self.command_executor(command_string, self.path_to_executable)
# bit displaced convenience function to switch on the lamps of hplc detector.
# TODO remove if published
def switch_lamp_on(self, address="192.168.10.111", port=10001):
"""
Has to be performed BEFORE starting clarity, otherwise sockets get blocked
Args:
address:
port:
Returns:
"""
# send the respective two commands and check return. Send to socket
message_sender = MessageSender(address, port)
message_sender.open_socket_and_send(Lamp_Command.deut_lamp_on)
sleep(1)
message_sender.open_socket_and_send(Lamp_Command.hal_lamp_on)
sleep(15)
# define relevant strings
def open_clarity_chrom(
self, user: str, config_file: str, password: str = None, start_method: str = ""
):
"""
start_method: supply the path to the method to start with, this is important for a soft column start
config file: if you want to start with specific instrumment configuration, specify location of config file here
"""
if not password:
self.execute_command(
f"i={self.instrument} cfg={config_file} u={user} {start_method}"
)
else:
self.execute_command(
f"i={self.instrument} cfg={config_file} u={user} p={password} {start_method}"
)
sleep(20)
# TODO should be OS agnostic
def slow_flowrate_ramp(self, path: str, method_list: tuple = ()):
"""
path: path where the methods are located
method list
"""
for current_method in method_list:
self.execute_command(f"i={self.instrument} {path}\\{current_method}")
# not very elegant, but sending and setting method takes at least 10 seconds, only has to run during platform startup and can't see more elegant way how to do that
sleep(20)
def load_file(self, path_to_file: str):
"""has to be done to open project, then method. Take care to select 'Send Method to Instrument' option in Method
Sending Options dialog in System Configuration."""
self.execute_command(f"i={self.instrument} {path_to_file}")
sleep(10)
def set_sample_name(self, sample_name):
"""Sets the sample name for the next single run"""
self.execute_command(f"i={self.instrument} set_sample_name={sample_name}")
sleep(1)
def run(self):
"""Runs the instrument. Care should be taken to activate automatic data export on HPLC. (can be done via command,
but that only makes it more complicated). Takes at least 2 sec until run starts"""
self.execute_command(f"run={self.instrument}")
def exit(self):
"""Exit Clarity Chrom"""
self.execute_command("exit")
sleep(10)
class MessageSender:
def __init__(self, host, port):
self.host = host
self.port = port
# encode('utf-8')
@tenacity.retry(
stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_fixed(2), reraise=True
)
def open_socket_and_send(self, message: str):
s = socket.socket()
s.connect((self.host, self.port))
s.sendall(message.encode("utf-8"))
s.close()
class ClarityExecutioner:
"""This needs to run on the computer having claritychrom installed, except for one uses the same PC. However,
going via socket and localhost would also work, but seems a bit cumbersome.
open up server socket. Everything coming in will be prepended with claritychrom.exe (if it is not already)"""
command_prepend = "claritychrom.exe"
def __init__(self, port, allowed_client="192.168.10.20", host_ip="192.168.10.11"):
self.port = port
self.allowed_client = allowed_client
self.host_ip = host_ip
# think that should also go in thread, otherwise blocks
self.server_socket = self.open_server()
self.executioner = Thread(target=self.get_commands_and_execute, daemon=False)
print("a")
self.executioner.start()
print("b")
def open_server(self):
s = socket.socket()
s.bind((self.host_ip, self.port))
s.listen(5)
return s
def accept_new_connection(self):
client_socket, address = self.server_socket.accept()
if not address[0] == self.allowed_client:
client_socket.close()
print(f"nice try {client_socket, address}")
else:
# if below code is executed, that means the sender is connected
print(f"[+] {address} is connected.")
# in unicode
request = client_socket.recv(1024).decode("utf-8")
client_socket.close()
print(request)
return request
# TODO: instrument number has to go into command execution
def execute_command(
self,
command: str,
folder_of_executable: Union[Path, str] = r"C:\claritychrom\bin\\",
):
prefix = "claritychrom.exe"
# sanitize input a bit
if command.split(" ")[0] != prefix:
command = folder_of_executable + prefix + " " + command # type:ignore
print(command)
try:
x = subprocess
x.run(command, shell=True, capture_output=False, timeout=3)
except subprocess.TimeoutExpired:
print("Damn, Subprocess")
def get_commands_and_execute(self):
while True:
request = self.accept_new_connection()
self.execute_command(request)
sleep(1)
print("listening")
###TODO: also dsk or k for opening with specific desktop could be helpful-.
# TODO Export results can be specified -> exports result, rewrite to a nicer interface
if __name__ == "__main__":
computer_w_Clarity = False
if computer_w_Clarity:
analyser = ClarityExecutioner(10014)
else:
commander = ClarityInterface(
remote=True, host="192.168.10.11", port=10014, instrument_number=2
)
commander.exit()
commander.switch_lamp_on() # address and port hardcoded
commander.open_clarity_chrom(
"admin",
config_file=r"C:\ClarityChrom\Cfg\automated_exp.cfg ",
start_method=r"D:\Data2q\sugar-optimizer\autostartup_analysis\autostartup_005_Sugar-c18_shortened.MET",
)
commander.slow_flowrate_ramp(
r"D:\Data2q\sugar-optimizer\autostartup_analysis",
method_list=(
"autostartup_005_Sugar-c18_shortened.MET",
"autostartup_01_Sugar-c18_shortened.MET",
"autostartup_015_Sugar-c18_shortened.MET",
"autostartup_02_Sugar-c18_shortened.MET",
"autostartup_025_Sugar-c18_shortened.MET",
"autostartup_03_Sugar-c18_shortened.MET",
"autostartup_035_Sugar-c18_shortened.MET",
"autostartup_04_Sugar-c18_shortened.MET",
"autostartup_045_Sugar-c18_shortened.MET",
"autostartup_05_Sugar-c18_shortened.MET",
),
)
commander.load_file(
r"D:\Data2q\sugar-optimizer\autostartup_analysis\auto_Sugar-c18_shortened.MET"
)
# commander.load_file("opendedicatedproject") # open a project for measurements
commander.set_sample_name("test123")
commander.run()
|
_progress.py
|
from __future__ import division, absolute_import
import sys
import threading
import time
from timeit import default_timer
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class progressbar(object):
"""A simple progressbar for iterables.
Displays a progress bar showing progress through an iterable.
Parameters
----------
iterable : iterable
The object to iterate over.
width : int, optional
Width of the bar in characters.
enabled : bool, optional
Whether to log progress. Useful for turning off progress reports
without changing your code. Default is True.
file : file, optional
Where to log progress. Default is ``sys.stdout``.
Example
-------
>>> with progressbar(iterable) as itbl: # doctest: +SKIP
... for i in itbl:
... do_stuff(i)
[########################################] | 100% Completed | 5.2 s
"""
def __init__(self, iterable, width=40, enabled=True, file=None):
self._iterable = iterable
self._ndone = 0
self._ntotal = len(iterable) + 1 # wait for exit to finish
self._width = width
self._enabled = enabled
self._file = sys.stdout if file is None else file
def __enter__(self):
if self._enabled:
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
return self
def __exit__(self, type, value, traceback):
if self._enabled:
self._running = False
self._timer.join()
if type is None: # Finished if no exception
self._ndone += 1
self._update_bar()
self._file.write('\n')
self._file.flush()
def __iter__(self):
for i in self._iterable:
self._ndone += 1
yield i
def _timer_func(self):
while self._running:
self._update_bar()
time.sleep(0.1)
def _update_bar(self):
elapsed = default_timer() - self._start_time
frac = (self._ndone / self._ntotal) if self._ntotal else 1
bar = '#' * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = '\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,
percent, elapsed)
try:
self._file.write(msg)
self._file.flush()
except ValueError:
pass
|
take_images.py
|
import base64
import io
import threading
import time
from time import sleep
import socketio
from picamera import PiCamera
from picamera.array import PiRGBArray
sio = socketio.Client()
sio.connect('http://localhost:5000')
with PiCamera() as camera:
# Set the camera's resolution to VGA @40fps and give it a couple
# of seconds to measure exposure etc.
# camera.rotation = 180handler.write_message(msg)
#
camera.resolution = (960, 640)
camera.brightness = 50
camera.framerate = 15
camera.shutter_speed = 100000
camera.exposure_mode = "off"
camera.rotation = 0
rawCapture = PiRGBArray(camera, size=(960, 640))
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format='jpg', use_video_port=True):
image = frame.array
rawCapture.truncate(0)
print("Picture")
b64_bytes = base64.b64encode(image)
b64_str = b64_bytes.decode()
sio.emit('pictureSet', str(data_uri))
sleep(0.02)
break
sio.disconnect()
class imageToWebserverThread:
def __init__(self, buffer: io.BytesIO):
self.buffer = buffer
self._stopped = False
self.thread1 = threading.Thread(target=self.read_thread)
self.thread1.start()
def thread(self):
while not self._stopped:
if self.buffer.readable():
pass
def stop(self):
self._stopped = True
self.thread1.join()
|
test_webpack.py
|
import json
import os
import time
from subprocess import call
from threading import Thread
import django
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.views.generic.base import TemplateView
from django_jinja.builtins import DEFAULT_EXTENSIONS
from unittest2 import skipIf
from webpack_loader.exceptions import (
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
WebpackBundleLookupError
)
from webpack_loader.utils import get_loader
BUNDLE_PATH = os.path.join(settings.BASE_DIR, 'assets/bundles/')
DEFAULT_CONFIG = 'DEFAULT'
class LoaderTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def compile_bundles(self, config, wait=None):
if wait:
time.sleep(wait)
call(['./node_modules/.bin/webpack', '--config', config])
@skipIf(django.VERSION < (1, 7),
'not supported in this django version')
def test_config_check(self):
from webpack_loader.apps import webpack_cfg_check
from webpack_loader.errors import BAD_CONFIG_ERROR
with self.settings(WEBPACK_LOADER={
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': 'webpack-stats.json',
}):
errors = webpack_cfg_check(None)
expected_errors = [BAD_CONFIG_ERROR]
self.assertEqual(errors, expected_errors)
with self.settings(WEBPACK_LOADER={
'DEFAULT': {}
}):
errors = webpack_cfg_check(None)
expected_errors = []
self.assertEqual(errors, expected_errors)
def test_simple_and_css_extract(self):
self.compile_bundles('webpack.config.simple.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.css']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.css'))
self.assertEqual(files['main.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
def test_js_gzip_extract(self):
self.compile_bundles('webpack.config.gzipTest.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.css']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.css'))
self.assertEqual(files['main.js.gz']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js.gz'))
def test_static_url(self):
self.compile_bundles('webpack.config.publicPath.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertEqual(assets['publicPath'], 'http://custom-static-host.com/')
def test_code_spliting(self):
self.compile_bundles('webpack.config.split.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEquals(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
self.assertEqual(files['vendors.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/vendors.js'))
def test_templatetags(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/main.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
self.assertIn('<link type="text/css" href="/static/bundles/app2.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/app2.js" ></script>', result.rendered_content)
self.assertIn('<img src="/static/my-image.png"/>', result.rendered_content)
view = TemplateView.as_view(template_name='only_files.html')
result = view(request)
self.assertIn("var contentCss = '/static/bundles/main.css'", result.rendered_content)
self.assertIn("var contentJS = '/static/bundles/main.js'", result.rendered_content)
self.compile_bundles('webpack.config.publicPath.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<img src="http://custom-static-host.com/my-image.png"/>', result.rendered_content)
def test_jinja2(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.jinja')
if django.VERSION >= (1, 8):
settings = {
'TEMPLATES': [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"extensions": DEFAULT_EXTENSIONS + [
"webpack_loader.contrib.jinja2ext.WebpackExtension",
]
}
},
]
}
else:
settings = {
'TEMPLATE_LOADERS': (
'django_jinja.loaders.FileSystemLoader',
'django_jinja.loaders.AppLoader',
),
}
with self.settings(**settings):
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/main.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
def test_reporting_errors(self):
self.compile_bundles('webpack.config.error.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackError as e:
self.assertIn("Can't resolve 'the-library-that-did-not-exist'", str(e))
def test_missing_bundle(self):
missing_bundle_name = 'missing_bundle'
self.compile_bundles('webpack.config.simple.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle(missing_bundle_name)
except WebpackBundleLookupError as e:
self.assertIn('Cannot resolve bundle {0}'.format(missing_bundle_name), str(e))
def test_missing_stats_file(self):
stats_file = settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE']
if os.path.exists(stats_file):
os.remove(stats_file)
try:
get_loader(DEFAULT_CONFIG).get_assets()
except IOError as e:
expected = (
'Error reading {0}. Are you sure webpack has generated the '
'file and the path is correct?'
).format(stats_file)
self.assertIn(expected, str(e))
def test_timeouts(self):
with self.settings(DEBUG=True):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'compile'}))
loader = get_loader(DEFAULT_CONFIG)
loader.config['TIMEOUT'] = 0.1
with self.assertRaises(WebpackLoaderTimeoutError):
loader.get_bundle('main')
def test_bad_status_in_production(self):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'unexpected-status'}))
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackLoaderBadStatsError as e:
self.assertIn((
"The stats file does not contain valid data. Make sure "
"webpack-bundle-tracker plugin is enabled and try to run"
" webpack again."
), str(e))
def test_request_blocking(self):
# FIXME: This will work 99% time but there is no guarantee with the
# 4 second thing. Need a better way to detect if request was blocked on
# not.
wait_for = 4
view = TemplateView.as_view(template_name='home.html')
with self.settings(DEBUG=True):
open(settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w').write(json.dumps({'status': 'compile'}))
then = time.time()
request = self.factory.get('/')
result = view(request)
t = Thread(target=self.compile_bundles, args=('webpack.config.simple.js', wait_for))
t2 = Thread(target=self.compile_bundles, args=('webpack.config.app2.js', wait_for))
t.start()
t2.start()
result.rendered_content
elapsed = time.time() - then
t.join()
t2.join()
self.assertTrue(elapsed >= wait_for)
with self.settings(DEBUG=False):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
then = time.time()
request = self.factory.get('/')
result = view(request)
result.rendered_content
elapsed = time.time() - then
self.assertTrue(elapsed < wait_for)
|
telemetry.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import platform
import Queue
import re
import sys
import threading
from collections import deque
from os import getenv, sep
from os.path import join
from time import sleep, time
from traceback import format_exc
import click
import requests
from platformio import __version__, app, exception, util
class TelemetryBase(object):
def __init__(self):
self._params = {}
def __getitem__(self, name):
return self._params.get(name, None)
def __setitem__(self, name, value):
self._params[name] = value
def __delitem__(self, name):
if name in self._params:
del self._params[name]
def send(self, hittype):
raise NotImplementedError()
class MeasurementProtocol(TelemetryBase):
TID = "UA-1768265-9"
PARAMS_MAP = {
"screen_name": "cd",
"event_category": "ec",
"event_action": "ea",
"event_label": "el",
"event_value": "ev"
}
def __init__(self):
TelemetryBase.__init__(self)
self['v'] = 1
self['tid'] = self.TID
self['cid'] = app.get_cid()
self['sr'] = "%dx%d" % click.get_terminal_size()
self._prefill_screen_name()
self._prefill_appinfo()
self._prefill_custom_data()
def __getitem__(self, name):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
return TelemetryBase.__getitem__(self, name)
def __setitem__(self, name, value):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
TelemetryBase.__setitem__(self, name, value)
def _prefill_appinfo(self):
self['av'] = __version__
# gather dependent packages
dpdata = []
dpdata.append("PlatformIO/%s" % __version__)
if app.get_session_var("caller_id"):
dpdata.append("Caller/%s" % app.get_session_var("caller_id"))
if getenv("PLATFORMIO_IDE"):
dpdata.append("IDE/%s" % getenv("PLATFORMIO_IDE"))
self['an'] = " ".join(dpdata)
def _prefill_custom_data(self):
def _filter_args(items):
result = []
stop = False
for item in items:
item = str(item).lower()
result.append(item)
if stop:
break
if item == "account":
stop = True
return result
caller_id = str(app.get_session_var("caller_id"))
self['cd1'] = util.get_systype()
self['cd2'] = "Python/%s %s" % (platform.python_version(),
platform.platform())
# self['cd3'] = " ".join(_filter_args(sys.argv[1:]))
self['cd4'] = 1 if (not util.is_ci()
and (caller_id or not util.is_container())) else 0
if caller_id:
self['cd5'] = caller_id.lower()
def _prefill_screen_name(self):
def _first_arg_from_list(args_, list_):
for _arg in args_:
if _arg in list_:
return _arg
return None
if not app.get_session_var("command_ctx"):
return
ctx_args = app.get_session_var("command_ctx").args
args = [str(s).lower() for s in ctx_args if not str(s).startswith("-")]
if not args:
return
cmd_path = args[:1]
if args[0] in ("platform", "platforms", "serialports", "device",
"settings", "account"):
cmd_path = args[:2]
if args[0] == "lib" and len(args) > 1:
lib_subcmds = ("builtin", "install", "list", "register", "search",
"show", "stats", "uninstall", "update")
sub_cmd = _first_arg_from_list(args[1:], lib_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
elif args[0] == "remote" and len(args) > 1:
remote_subcmds = ("agent", "device", "run", "test")
sub_cmd = _first_arg_from_list(args[1:], remote_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
if len(args) > 2 and sub_cmd in ("agent", "device"):
remote2_subcmds = ("list", "start", "monitor")
sub_cmd = _first_arg_from_list(args[2:], remote2_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
self['screen_name'] = " ".join([p.title() for p in cmd_path])
@staticmethod
def _ignore_hit():
if not app.get_setting("enable_telemetry"):
return True
if app.get_session_var("caller_id") and \
all(c in sys.argv for c in ("run", "idedata")):
return True
return False
def send(self, hittype):
if self._ignore_hit():
return
self['t'] = hittype
# correct queue time
if "qt" in self._params and isinstance(self['qt'], float):
self['qt'] = int((time() - self['qt']) * 1000)
MPDataPusher().push(self._params)
@util.singleton
class MPDataPusher(object):
MAX_WORKERS = 5
def __init__(self):
self._queue = Queue.LifoQueue()
self._failedque = deque()
self._http_session = requests.Session()
self._http_offline = False
self._workers = []
def push(self, item):
# if network is off-line
if self._http_offline:
if "qt" not in item:
item['qt'] = time()
self._failedque.append(item)
return
self._queue.put(item)
self._tune_workers()
def in_wait(self):
return self._queue.unfinished_tasks
def get_items(self):
items = list(self._failedque)
try:
while True:
items.append(self._queue.get_nowait())
except Queue.Empty:
pass
return items
def _tune_workers(self):
for i, w in enumerate(self._workers):
if not w.is_alive():
del self._workers[i]
need_nums = min(self._queue.qsize(), self.MAX_WORKERS)
active_nums = len(self._workers)
if need_nums <= active_nums:
return
for i in range(need_nums - active_nums):
t = threading.Thread(target=self._worker)
t.daemon = True
t.start()
self._workers.append(t)
def _worker(self):
while True:
try:
item = self._queue.get()
_item = item.copy()
if "qt" not in _item:
_item['qt'] = time()
self._failedque.append(_item)
if self._send_data(item):
self._failedque.remove(_item)
self._queue.task_done()
except: # pylint: disable=W0702
pass
def _send_data(self, data):
if self._http_offline:
return False
try:
r = self._http_session.post(
"https://ssl.google-analytics.com/collect",
data=data,
headers=util.get_request_defheaders(),
timeout=1)
r.raise_for_status()
return True
except requests.exceptions.HTTPError as e:
# skip Bad Request
if 400 >= e.response.status_code < 500:
return True
except: # pylint: disable=W0702
pass
self._http_offline = True
return False
def on_command():
resend_backuped_reports()
mp = MeasurementProtocol()
mp.send("screenview")
if util.is_ci():
measure_ci()
def measure_ci():
event = {"category": "CI", "action": "NoName", "label": None}
envmap = {
"APPVEYOR": {
"label": getenv("APPVEYOR_REPO_NAME")
},
"CIRCLECI": {
"label":
"%s/%s" % (getenv("CIRCLE_PROJECT_USERNAME"),
getenv("CIRCLE_PROJECT_REPONAME"))
},
"TRAVIS": {
"label": getenv("TRAVIS_REPO_SLUG")
},
"SHIPPABLE": {
"label": getenv("REPO_NAME")
},
"DRONE": {
"label": getenv("DRONE_REPO_SLUG")
}
}
for key, value in envmap.iteritems():
if getenv(key, "").lower() != "true":
continue
event.update({"action": key, "label": value['label']})
on_event(**event)
def on_run_environment(options, targets):
opts = [
"%s=%s" % (opt, value.replace("\n", ", ") if "\n" in value else value)
for opt, value in sorted(options.items())
]
targets = [t.title() for t in targets or ["run"]]
on_event("Env", " ".join(targets), "&".join(opts))
def on_event(category, action, label=None, value=None, screen_name=None):
mp = MeasurementProtocol()
mp['event_category'] = category[:150]
mp['event_action'] = action[:500]
if label:
mp['event_label'] = label[:500]
if value:
mp['event_value'] = int(value)
if screen_name:
mp['screen_name'] = screen_name[:2048]
mp.send("event")
def on_exception(e):
def _cleanup_description(text):
text = text.replace("Traceback (most recent call last):", "")
text = re.sub(
r'File "([^"]+)"',
lambda m: join(*m.group(1).split(sep)[-2:]),
text,
flags=re.M)
text = re.sub(r"\s+", " ", text, flags=re.M)
return text.strip()
skip_conditions = [
isinstance(e, cls)
for cls in (IOError, exception.ReturnErrorCode,
exception.AbortedByUser, exception.NotGlobalLibDir,
exception.InternetIsOffline,
exception.NotPlatformIOProject,
exception.UserSideException)
]
try:
skip_conditions.append("[API] Account: " in str(e))
except UnicodeEncodeError as ue:
e = ue
if any(skip_conditions):
return
is_crash = any([
not isinstance(e, exception.PlatformioException),
"Error" in e.__class__.__name__
])
mp = MeasurementProtocol()
description = _cleanup_description(format_exc() if is_crash else str(e))
mp['exd'] = ("%s: %s" % (type(e).__name__, description))[:2048]
mp['exf'] = 1 if is_crash else 0
mp.send("exception")
@atexit.register
def _finalize():
timeout = 1000 # msec
elapsed = 0
try:
while elapsed < timeout:
if not MPDataPusher().in_wait():
break
sleep(0.2)
elapsed += 200
backup_reports(MPDataPusher().get_items())
except KeyboardInterrupt:
pass
def backup_reports(items):
if not items:
return
KEEP_MAX_REPORTS = 100
tm = app.get_state_item("telemetry", {})
if "backup" not in tm:
tm['backup'] = []
for params in items:
# skip static options
for key in params.keys():
if key in ("v", "tid", "cid", "cd1", "cd2", "sr", "an"):
del params[key]
# store time in UNIX format
if "qt" not in params:
params['qt'] = time()
elif not isinstance(params['qt'], float):
params['qt'] = time() - (params['qt'] / 1000)
tm['backup'].append(params)
tm['backup'] = tm['backup'][KEEP_MAX_REPORTS * -1:]
app.set_state_item("telemetry", tm)
def resend_backuped_reports():
tm = app.get_state_item("telemetry", {})
if "backup" not in tm or not tm['backup']:
return False
for report in tm['backup']:
mp = MeasurementProtocol()
for key, value in report.items():
mp[key] = value
mp.send(report['t'])
# clean
tm['backup'] = []
app.set_state_item("telemetry", tm)
return True
|
multi.py
|
import gym
import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
# PARAMETERS
OUTPUT_GRAPH = True # safe logs
RENDER = True # render one worker
LOG_DIR = './log' # savelocation for logs
N_WORKERS = multiprocessing.cpu_count() # number of workers
MAX_EP_STEP = 200 # maxumum number of steps per episode
MAX_GLOBAL_EP = 2000 # total number of episodes
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10 # sets how often the global net is updated
GAMMA = 0.90 # discount factor
ENTROPY_BETA = 0.01 # entropy factor
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
population = [
np.load("./data/seocho.npy"),
np.load("./data/daechi.npy"),
np.load("./data/dogok.npy"),
np.load("./data/yangjae.npy"),
np.load("./data/sunreung.npy"),
np.load("./data/nambu.npy")
]
env = gym.make('EpidemicMultiEnv-v0')
env.env.__init__(200, population)
agent_num = env.env.agent_num
num_episodes = 300
# time.sleep(100)
r = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
env.reset()
# if RENDER: # uncomment if rendering does not work
# env.render()
N_S = env.observation_space.shape[0] # number of states
N_A = env.action_space.shape[0] # number of actions
A_BOUND = [env.action_space.low, env.action_space.high] # action bounds
# Network for the Actor Critic
class ACNet(object):
def __init__(self, scope, sess, globalAC=None):
self.sess = sess
self.actor_optimizer = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA') # optimizer for the actor
self.critic_optimizer = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # optimizer for the critic
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_params, self.c_params = self._build_net(scope)[-2:] # parameters of actor and critic net
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A') # action
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget') # v_target value
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(
scope) # get mu and sigma of estimated action from neural net
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0],
A_BOUND[1]) # sample a action from distribution
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss,
self.a_params) # calculate gradients for the network weights
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'): # update local and global network weights
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = self.actor_optimizer.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = self.critic_optimizer.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope): # neural network structure of the actor and critic
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init,
name='sigma') # estimated variance
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # estimated value for state
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return self.sess.run(self.A, {self.s: s})[0]
# worker class that inits own environment, trains on it and updloads weights to global net
class Worker(object):
def __init__(self, name, globalAC, sess):
self.env = gym.make('EpidemicMultiEnv-v0').unwrapped # make environment for each worker
self.name = name
self.AC = ACNet(name, sess, globalAC) # create ACNet for each worker
self.sess = sess
def work(self):
global global_rewards, global_episodes
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not coord.should_stop() and global_episodes < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0' and RENDER:
self.env.render()
a = self.AC.choose_action(s) # estimate stochastic action based on policy
s_, r, done, info = self.env.step(a) # make step in environment
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
# save actions, states and rewards in buffer
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict) # actual training step, update global ACNet
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global() # get global parameters to local ACNet
s = s_
total_step += 1
if done:
if len(global_rewards) < 5: # record running episode reward
global_rewards.append(ep_r)
else:
global_rewards.append(ep_r)
global_rewards[-1] = (np.mean(global_rewards[-5:])) # smoothing
print(
self.name,
"Ep:", global_episodes,
"| Ep_r: %i" % global_rewards[-1],
)
global_episodes += 1
break
if __name__ == "__main__":
global_rewards = []
global_episodes = 0
sess = tf.Session()
with tf.device("/cpu:0"):
global_ac = ACNet(GLOBAL_NET_SCOPE, sess) # we only need its params
workers = []
# Create workers
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, global_ac, sess))
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH: # write log file
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, sess.graph)
worker_threads = []
for worker in workers: # start workers
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
coord.join(worker_threads) # wait for termination of workers
plt.plot(np.arange(len(global_rewards)), global_rewards) # plot rewards
plt.xlabel('step')
plt.ylabel('total moving reward')
plt.show()
|
MonochromatorGUI.py
|
import os
os.environ['KIVY_IMAGE'] = 'pil,sdl2'
import serial
import threading
import re
import time
from serial.tools import list_ports
from uart import uart
from kivy.properties import StringProperty
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.app import App
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.uix.slider import Slider
from kivy.uix.dropdown import DropDown
from kivy.graphics import Color, Rectangle
class ColorLabel(Label):
def on_size(self, *args):
self.canvas.before.clear()
with self.canvas.before:
Color(1, 1, 1, 0.7)
Rectangle(pos=self.pos, size=self.size)
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
class MyApp(App):
#------------------GLOBALS---------------------
#--------SET Globals startup variables------------
connection_flag = False
slider_value = 0
kill_event = threading.Event()
current_wavelength_value=StringProperty()
current_wavelength_value="[nm]"
#-------------------------------------------------
def check(self,cmd):
if len(cmd)==12:
if cmd[1:-6]=="xSETx":
self.current_wavelength_value=cmd[6:]
#----------FUNCTIONS-------------
def read(self,kill_event, x):
self.kill_event.clear()
print('reading thread run')
tmp = ''
raw = []
while not self.kill_event.is_set():
buf = self.ser.read_byte()
if type(buf) is int:
if buf != ord('\n') and buf != ord('\r'):
tmp += chr(buf)
raw.append(buf)
elif len(raw) > 0:
print(tmp)
tmp = ''
raw = []
else:
if tmp != '' and tmp != '\n' and tmp != '\r' and len(raw) > 0:
print(tmp)
tmp = ''
raw = []
print("kill thread")
def COM_select(self,x):
self.demand_close()
print(x)
self.kill_event.clear()
self.ser = uart(baudrate=11520, name=x)
self.connection_flag=True
thread = threading.Thread(target=self.read, args=(self.kill_event, "task"))
thread.start()
def demand_close(self,*instance):
if(self.connection_flag==True):
self.kill_event.set()
time.sleep(0.02)
self.ser.close_connection()
self.connection_flag=False
#--------------------------------
def on_text(self,instance, value):
self.wavelength_value=value
def data_send(self,instnace):
if(self.connection_flag==True):
if(self.wavelength_value):
value=int(float(self.wavelength_value)*1000)
self.ser.writeword("xgo_to%06d"%value)
def calibration(self,*instance):
if(self.connection_flag==True):
self.ser.writeword("xaucal000000")
else:
print("\n\rCan't send calibration request: no connection open\n\r")
def OnGetWavelengthValue(self,instance):
if(self.connection_flag==True):
self.ser.writeword("xgetpo000000")
def Go_Up(self,instance):
if(self.connection_flag==True):
if(self.slider_value==1):
self.ser.writeword("xgo_up001000")
elif(self.slider_value==2):
self.ser.writeword("xgo_up000100")
elif(self.slider_value==3):
self.ser.writeword("xgo_up000010")
elif(self.slider_value==4):
self.ser.writeword("xgo_up000001")
else:
self.ser.writeword("xgo_up001000")
def Go_Down(self,instance):
if(self.connection_flag==True):
if(self.slider_value==1):
self.ser.writeword("xgo_dw001000")
elif(self.slider_value==2):
self.ser.writeword("xgo_dw000100")
elif(self.slider_value==3):
self.ser.writeword("xgo_dw000010")
elif(self.slider_value==4):
self.ser.writeword("xgo_dw000001")
else:
self.ser.writeword("xgo_dw001000")
#--------------------------------------
def build(self):
#--------------------------------
dropdown = DropDown()
for port in list_ports.comports():
btn = Button(text='%s' % (port.description), size_hint_y=None, height=44, width=150)
btn.bind(on_release=lambda btn: dropdown.select(btn.text))
dropdown.add_widget(btn)
dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))
dropdown.bind(on_select=lambda instance, x: self.COM_select(x))
#--------------------------------
data_input = FloatInput(size_hint=(1, 0.66))
data_input.bind(text=self.on_text)
#------------------------------
send_data=Button(text='Go to [nm]:', width=350,size_hint=(1, 1))
send_data.bind(on_press=self.data_send)
#-----------------------------
Get_Data_btn=Button(text='Check Positon', size_hint=(0.5, 0.66666),pos_hint={'x': 0, 'center_y': 0.6666666})
Get_Data_Lab=ColorLabel(text=self.current_wavelength_value, markup=True, size_hint=(0.5, .66),pos_hint={'x': 0, 'center_y': 0.666666})
Get_Data_btn.bind(on_press=self.OnGetWavelengthValue)
#-----------------------------
mainbutton = Button(text='Select COM port',size_hint=(1, 1))
mainbutton.bind(on_release=dropdown.open)
#----------------------------
con_close = Button(text='COM port close',size_hint=(1, 1))
con_close.bind(on_press=self.demand_close)
#----------------------------
slider_label=Label(text="Move by: nanometer", markup=True, pos_hint={'x': 0, 'center_y': 1.5}, size_hint=(1, 1))
#----------------------------
def OnSliderValueChange(instance,value):
self.slider_value=value
if(value==1):
slider_label.text ="Move by: nanometer"
elif(value==2):
slider_label.text ="Move by: tenth of nanometer"
elif(value==3):
slider_label.text ="Move by: hundreth of nanometer"
elif(value==4):
slider_label.text ="Move by: thousandth of nanometer"
data_slider=Slider(min=1, max=4,step=1,value=1, orientation='horizontal', pos_hint={'x': 0, 'center_y': 1}, size_hint=(1, 1))
data_slider.bind(value=OnSliderValueChange)
#-----------------------------
auto_cal=Button(text='AutoCalibration', size_hint=(1, 1))
auto_cal.bind(on_press=self.calibration)
#-----------------------------
step_down=Button(text='Step Down', size_hint=(0.5, .6666),pos_hint={'x': 0, 'center_y': 1.})
step_down.bind(on_press=self.Go_Down)
step_up=Button(text='Step Up', size_hint=(.5, .6666),pos_hint={'x': 0.5, 'center_y': 1.})
step_up.bind(on_press=self.Go_Up)
#------------------------------
main_layout = GridLayout(cols=2)
controls_layout = BoxLayout(orientation='vertical')
controls_layout.add_widget(send_data)
controls_layout.add_widget(auto_cal)
controls_layout.add_widget(con_close)
controls_layout.add_widget(mainbutton)
inputs_layout = BoxLayout(orientation='vertical')
inputs_layout.add_widget(data_input)
getdata_layout = BoxLayout(orientation='horizontal')
getdata_layout.add_widget(Get_Data_Lab)
getdata_layout.add_widget(Get_Data_btn)
inputs_layout.add_widget(getdata_layout)
manual_layout = GridLayout(rows=2)
manual_sublayout1=FloatLayout()
manual_sublayout1.add_widget(slider_label)
manual_sublayout1.add_widget(data_slider)
manual_sublayout2=FloatLayout()
manual_sublayout2.add_widget(step_down)
manual_sublayout2.add_widget(step_up)
manual_layout.add_widget(manual_sublayout1)
manual_layout.add_widget(manual_sublayout2)
inputs_layout.add_widget(manual_layout)
main_layout.add_widget(controls_layout)
main_layout.add_widget(inputs_layout)
return main_layout
def on_stop(self):
self.demand_close()
MyApp().run()
|
doh-fork-async.py
|
#!/usr/bin/env python3
import asyncio, aiohttp, aioprocessing
import random, struct
import argparse, logging
# Handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--listen-address', default='127.0.0.1',
help='address to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-p', '--listen-port', type=int, default=53,
help='port to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-u', '--upstreams', nargs='+', default=['https://1.1.1.1/dns-query', 'https://1.0.0.1/dns-query'],
help='upstream servers to forward DNS queries and requests to (default: %(default)s)')
parser.add_argument('-t', '--tcp', action='store_true', default=False,
help='serve TCP based queries and requests along with UDP (default: %(default)s)')
args = parser.parse_args()
host = args.listen_address
port = args.listen_port
upstreams = args.upstreams
headers = {'accept': 'application/dns-message', 'content-type': 'application/dns-message'}
conns = []
request_queue = aioprocessing.AioQueue()
response_queue = aioprocessing.AioQueue()
def main():
# Setup logging
logging.basicConfig(level='INFO', format='[%(levelname)s] %(message)s')
# Setup event loop
loop = asyncio.get_event_loop()
# Setup UDP server
logging.info('Starting UDP server listening on: %s#%d' % (host, port))
udp_listen = loop.create_datagram_endpoint(UdpDohProtocol, local_addr = (host, port))
udp, protocol = loop.run_until_complete(udp_listen)
# Setup TCP server
if args.tcp:
logging.info('Starting TCP server listening on %s#%d' % (host, port))
tcp_listen = loop.create_server(TcpDohProtocol, host, port)
tcp = loop.run_until_complete(tcp_listen)
# # Connect to upstream servers
# for upstream in upstreams:
# logging.info('Connecting to upstream server: %s' % (upstream))
# conns.append(loop.run_until_complete(upstream_connect()))
# Serve forever
try:
for _ in range(3):
aioprocessing.AioProcess(target=forwarder, daemon=True).start()
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
pass
# # Close upstream connections
# for conn in conns:
# loop.run_until_complete(upstream_close(conn))
# Close listening servers and event loop
udp.close()
if args.tcp:
tcp.close()
loop.close()
def forwarder():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
conns = []
# Connect to upstream servers
for upstream in upstreams:
logging.info('Connecting to upstream server: %s' % (upstream))
conns.append(loop.run_until_complete(upstream_connect()))
asyncio.ensure_future(forward_loop(conns))
try:
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
pass
# Close upstream connections
for conn in conns:
loop.run_until_complete(upstream_close(conn))
async def forward_loop(conns):
while True:
# Receive requests from the listener
data, addr = await request_queue.coro_get()
# Schedule packet forwarding
asyncio.ensure_future(forward(data, addr, conns))
async def forward(data, addr, conns):
# Select upstream server to forward to
index = random.randrange(len(upstreams))
# Await upstream forwarding coroutine
data = await upstream_forward(upstreams[index], data, conns[index])
# Send response to the listener
await response_queue.coro_put((data, addr))
class UdpDohProtocol(asyncio.DatagramProtocol):
"""
DNS over HTTPS UDP protocol to use with asyncio.
"""
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
# Schedule packet forwarding
asyncio.ensure_future(self.forward(data, addr))
def error_received(self, exc):
logging.warning('Minor transport error')
async def forward(self, data, addr):
# Send request to forwarder
await request_queue.coro_put((data, addr))
# Receive response from forwarder
data, addr = await response_queue.coro_get()
# Send response to the client
self.transport.sendto(data, addr)
class TcpDohProtocol(asyncio.Protocol):
"""
DNS over HTTPS TCP protocol to use with asyncio.
"""
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
# Schedule packet forwarding
asyncio.ensure_future(self.forward(data))
def eof_received(self):
if self.transport.can_write_eof():
self.transport.write_eof()
def connection_lost(self, exc):
self.transport.close()
async def forward(self, data):
# Send request to forwarder (remove 16-bit length prefix)
await request_queue.coro_put((data[2:], None))
# Receive response from forwarder
data, _ = await response_queue.coro_get()
# Send response to the client (add 16-bit length prefix)
self.transport.write(struct.pack('! H', len(data)) + data)
async def upstream_connect():
"""
Create an upstream connection that will later be bound to a url.
Returns:
A aiohttp session object
"""
# Create connection with default DNS message headers
return aiohttp.ClientSession(headers=headers)
async def upstream_forward(url, data, conn):
"""
Send a DNS request over HTTPS using POST method.
Params:
url - url to forward queries to
data - normal DNS packet data to forward
conn - HTTPS connection to upstream DNS server
Returns:
A normal DNS response packet from upstream server
Notes:
Using DNS over HTTPS POST format as described here:
https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-12
https://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/
"""
disconnected = False
# Await upstream response
while True:
try:
# Attempt to query the upstream server asynchronously
async with conn.post(url, data=data) as response:
if disconnected == True:
logging.info('Reconnected to upstream server: %s' % (url))
disconnected = False
if response.status == 200:
return await response.read()
# Log abnormal HTTP status codes
logging.warning('%s (%d): IN %s, OUT %s' % (url, response.status, data, await response.read()))
# Log connection errors (aiohttp should attempt to reconnect on next request)
except aiohttp.ClientConnectionError:
logging.error('Connection error with upstream server: %s' % (url))
disconnected = True
async def upstream_close(conn):
"""
Close an upstream connection.
Params:
conn - aiohttp session object to close
"""
await conn.close()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.