source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
sample_generator.py
|
from collections import deque
import threading
import randomgen
from utils.sampling.perlin import create_perlin_noise, calc_fade
class SampleGenerator:
"""
Multi-threaded util that runs in the background and precalculates noise samples. To be used in with statement!
Not a python "generator". Sorry about the name.
"""
def __init__(self, shape, n_threads=1, queue_lengths=40):
self.shape = shape
self.n_threads = n_threads
self.queue_lengths = queue_lengths
self.queue_normal = deque()
self.queue_perlin = deque()
self.perlin_fade = calc_fade(shape[0])
self.perlin_color = True
# Manually reimplemented Queue locking to cover 2 deque instead of 1.
self.lock = threading.Lock()
self.cv_not_full = threading.Condition(self.lock) # Producer condition
self.cv_not_empty = threading.Condition(self.lock) # Consumer condition
self.is_running = True
self.threads = []
for thread_id in range(n_threads):
thread = threading.Thread(target=self._thread_fun, args=(thread_id,))
thread.start()
self.threads.append(thread)
def __enter__(self):
# Threads already started at __init__
return self
def __exit__(self, exc_type, exc_value, traceback):
# Stop all the threads.
for thread in self.threads:
thread.do_run = False
self.is_running = False
# Stop producers.
with self.cv_not_full:
self.cv_not_full.notify_all()
# Stop consumers, if any.
# Usually, Consumers are from the same thread that created and destroys this object. So there will be none waiting at this point.
# If, however, there is a consumer from a background thread, they could be still waiting, and then they will receive an
# InterruptedError here.
with self.cv_not_empty:
self.cv_not_empty.notify_all()
for thread in self.threads:
thread.join()
print("SampleGenerator: all threads stopped.")
def _thread_fun(self, thread_id):
# create a thread-specifc RNG
rng = randomgen.RandomGenerator(randomgen.Xoroshiro128(seed=20 + thread_id))
rnd_normal = None
rnd_perlin = None
t = threading.currentThread()
while getattr(t, 'do_run', True):
# Prepare one of each sampling patterns
if rnd_normal is None:
rnd_normal = rng.standard_normal(size=self.shape, dtype='float64')
if rnd_perlin is None:
rnd_perlin = create_perlin_noise(color=self.perlin_color, batch_size=1, normalize=True, precalc_fade=self.perlin_fade)[0]
# Lock and put them into the queues.
with self.cv_not_full:
if len(self.queue_normal) >= self.queue_lengths and len(self.queue_perlin) >= self.queue_lengths:
self.cv_not_full.wait()
# Fill one or both queues.
if len(self.queue_normal) < self.queue_lengths:
self.queue_normal.append(rnd_normal)
rnd_normal = None
if len(self.queue_perlin) < self.queue_lengths:
self.queue_perlin.append(rnd_perlin)
rnd_perlin = None
self.cv_not_empty.notify_all()
def get_normal(self):
"""
Returns a std-normal noise vector - not normalized!
"""
with self.cv_not_empty:
while len(self.queue_normal) == 0:
self.cv_not_empty.wait()
if not self.is_running:
raise InterruptedError("Trying to consume an item, but the SampleGenerator was already shut down!")
retval = self.queue_normal.popleft()
self.cv_not_full.notify()
return retval
def get_perlin(self):
"""
Returns a perlin noise vector, normalized to L2=1
"""
with self.cv_not_empty:
while len(self.queue_perlin) == 0:
self.cv_not_empty.wait()
if not self.is_running:
raise InterruptedError("Trying to consume an item, but the SampleGenerator was already shut down!")
retval = self.queue_perlin.popleft()
self.cv_not_full.notify()
return retval
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
import errno
# Import Salt Libs
import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.utils.async
import salt.utils.files
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
import salt.ext.six as six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import errno
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
)
def __getstate__(self):
return {'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key))
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
host, port = parse.netloc.rsplit(':', 1)
self.master_addr = (host, int(port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, host, int(port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.async.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(
traceback.format_exc())
)
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token('salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in str(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.async.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if '\0' in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: {0}'.format(exc), exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = tornado.ioloop.IOLoop.current()
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client {0} connected'.format(address))
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected {0}'.format(address))
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: {0}'.format(e))
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None):
self.opts = opts
self.host = host
self.port = port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.async.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.async.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
elif self.io_loop != tornado.ioloop.IOLoop.current(instance=False):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
with salt.utils.async.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id {0} that we are not tracking'.format(message_id))
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0}:{1} closed, unable to recv'.format(self.host, self.port))
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = set([client])
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0} closed, unable to recv'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at {0} connected'.format(address))
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: {0}'.format(package))
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target {0} not connected'.format(topic))
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at {0} has disconnected from publisher'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
kwargs = {}
if salt.utils.is_windows():
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual async interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.async.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
run.py
|
import argparse, re, os
from .product import version
import importlib.util
import threading
def register_watch(module, filepath):
spec = importlib.util.spec_from_file_location(module, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
watches = []
for (name, item) in module.__dict__.items():
if name.startswith('watch'):
watches.append(item)
return watches
def tagExpressionGen(argstr):
tagRules = []
for part in argstr:
# 有单引号,是表达式
if "'" in part:
rule = re.sub(r"'.+?'", lambda m: f'tagmatch({m.group(0)})', part)
tagRules.append(f'({rule})')
# 是简单标签名
else:
rule = f"tagmatch('{part}')"
tagRules.append(f'{rule}')
return ' or '.join(tagRules)
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=f'hytest v{version}', help="显示版本号")
parser.add_argument('--new', metavar='project_dir', help="创建新项目目录")
parser.add_argument("case_dir", help="用例根目录", nargs='?', default='cases')
parser.add_argument("-L", "--loglevel", metavar='Level_Number', type=int, help="日志级别 0:低 - 1:高", default=0)
parser.add_argument("--test", metavar='Case_Name', action='append', help="用例名过滤,支持通配符", default=[])
parser.add_argument("--suite", metavar='Suite_Name', action='append', help="套件名过滤,支持通配符", default=[])
parser.add_argument("--tag", metavar='Tag_Expression', action='append', help="标签名过滤,支持通配符", default=[])
parser.add_argument("--tagnot", metavar='Tag_Expression', action='append', help="标签名过滤,支持通配符", default=[])
parser.add_argument("-A", "--argfile", metavar='Argument_File', help="参数文件",
type=argparse.FileType('r', encoding='utf8'))
parser.add_argument('-W', "--watch", nargs='?', help="多线程监控")
args = parser.parse_args()
event = threading.Event()
if args.watch:
watch_list = register_watch('watch', args.watch)
for item in watch_list:
t = threading.Thread(target=item, args=(event,))
t.setDaemon(True)
t.start()
else:
watch_list = None
# 有参数放在文件中
if args.argfile:
fileArgs = [para for para in args.argfile.read().replace('\n', ' ').split() if para]
args = parser.parse_args(fileArgs, args)
# 创建项目目录
if args.new:
projDir = args.new
if os.path.exists(projDir):
print(f'{projDir} already exists!')
exit(2)
os.makedirs(f'{projDir}/cases')
with open(f'{projDir}/cases/case1.py', 'w', encoding='utf8') as f:
f.write('''class c1:
name = '用例名称 - 0001'
# 测试用例步骤
def teststeps(self):...''')
exit()
if not os.path.exists(args.case_dir):
print(f' {args.case_dir} 目录不存在,工作目录为:{os.getcwd()}')
exit(2) # 2 表示没有可以执行的用例
if not os.path.isdir(args.case_dir):
print(f' {args.case_dir} 不是目录,工作目录为:{os.getcwd()}')
exit(2) # 2 表示没有可以执行的用例
# 同时执行log里面的初始化日志模块,注册signal的代码
from .utils.log import LogLevel
from .utils.runner import Collector, Runner
LogLevel.level = args.loglevel
# print('loglevel',LogLevel.level)
# --tag "'冒烟测试' and 'UITest' or (not '快速' and 'fast')" --tag 白月 --tag 黑羽
tag_include_expr = tagExpressionGen(args.tag)
tag_exclude_expr = tagExpressionGen(args.tagnot)
# print(tag_include_expr)
# print(tag_exclude_expr)
print(f'''
* * * * * * * * * * * * * * * * * *
* hytest {version} www.byhy.net *
* * * * * * * * * * * * * * * * * *
'''
)
os.makedirs('log/imgs', exist_ok=True)
Collector.run(
casedir=args.case_dir,
suitename_filters=args.suite,
casename_filters=args.test,
tag_include_expr=tag_include_expr,
tag_exclude_expr=tag_exclude_expr,
)
# 0 表示执行成功 , 1 表示有错误 , 2 表示没有可以执行的用例,3 表示网页或APP崩溃恢复原状失败
result = [] # 懒得重写thread 就传list拿结果
t = threading.Thread(target=Runner.run, args=(result, event))
t.setDaemon(True)
t.start()
try:
while t.is_alive():
pass
except:
print('Stopped by keyboard')
if watch_list:
event.clear()
if not result:
return 0
return result[0]
if __name__ == '__main__':
exit(run())
|
_connectivity_channel.py
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Affords a connectivity-state-listenable channel."""
import threading
import time
from grpc._adapter import _low
from grpc._adapter import _types
from grpc.beta import interfaces
from grpc.framework.foundation import callable_util
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
state: connectivity
for state, connectivity in zip(_types.ConnectivityState,
interfaces.ChannelConnectivity)
}
class ConnectivityChannel(object):
def __init__(self, low_channel):
self._lock = threading.Lock()
self._low_channel = low_channel
self._polling = False
self._connectivity = None
self._try_to_connect = False
self._callbacks_and_connectivities = []
self._delivering = False
def _deliveries(self, connectivity):
callbacks_needing_update = []
for callback_and_connectivity in self._callbacks_and_connectivities:
callback, callback_connectivity = callback_and_connectivity
if callback_connectivity is not connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = connectivity
return callbacks_needing_update
def _deliver(self, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with self._lock:
callbacks = self._deliveries(self._connectivity)
if callbacks:
connectivity = self._connectivity
else:
self._delivering = False
return
def _spawn_delivery(self, connectivity, callbacks):
delivering_thread = threading.Thread(
target=self._deliver, args=(connectivity, callbacks,))
delivering_thread.start()
self._delivering = True
# TODO(issue 3064): Don't poll.
def _poll_connectivity(self, low_channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
low_connectivity = low_channel.check_connectivity_state(try_to_connect)
with self._lock:
self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
low_connectivity]
callbacks = tuple(
callback
for callback, unused_but_known_to_be_none_connectivity in
self._callbacks_and_connectivities)
for callback_and_connectivity in self._callbacks_and_connectivities:
callback_and_connectivity[1] = self._connectivity
if callbacks:
self._spawn_delivery(self._connectivity, callbacks)
completion_queue = _low.CompletionQueue()
while True:
low_channel.watch_connectivity_state(low_connectivity,
time.time() + 0.2,
completion_queue, None)
event = completion_queue.next()
with self._lock:
if not self._callbacks_and_connectivities and not self._try_to_connect:
self._polling = False
self._connectivity = None
completion_queue.shutdown()
break
try_to_connect = self._try_to_connect
self._try_to_connect = False
if event.success or try_to_connect:
low_connectivity = low_channel.check_connectivity_state(
try_to_connect)
with self._lock:
self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
low_connectivity]
if not self._delivering:
callbacks = self._deliveries(self._connectivity)
if callbacks:
self._spawn_delivery(self._connectivity, callbacks)
def subscribe(self, callback, try_to_connect):
with self._lock:
if not self._callbacks_and_connectivities and not self._polling:
polling_thread = threading.Thread(
target=self._poll_connectivity,
args=(self._low_channel, bool(try_to_connect)))
polling_thread.start()
self._polling = True
self._callbacks_and_connectivities.append([callback, None])
elif not self._delivering and self._connectivity is not None:
self._spawn_delivery(self._connectivity, (callback,))
self._try_to_connect |= bool(try_to_connect)
self._callbacks_and_connectivities.append(
[callback, self._connectivity])
else:
self._try_to_connect |= bool(try_to_connect)
self._callbacks_and_connectivities.append([callback, None])
def unsubscribe(self, callback):
with self._lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(self._callbacks_and_connectivities):
if callback == subscribed_callback:
self._callbacks_and_connectivities.pop(index)
break
def low_channel(self):
return self._low_channel
|
test_websocket_provider.py
|
import asyncio
from concurrent.futures import (
TimeoutError,
)
import pytest
from threading import (
Thread,
)
import websockets
from tests.utils import (
wait_for_ws,
)
from cpc_fusion import Web3
from cpc_fusion.exceptions import (
ValidationError,
)
from cpc_fusion.providers.websocket import (
WebsocketProvider,
)
@pytest.yield_fixture
def start_websocket_server(open_port):
event_loop = asyncio.new_event_loop()
def run_server():
async def empty_server(websocket, path):
data = await websocket.recv()
await asyncio.sleep(0.02)
await websocket.send(data)
server = websockets.serve(empty_server, '127.0.0.1', open_port, loop=event_loop)
event_loop.run_until_complete(server)
event_loop.run_forever()
thd = Thread(target=run_server)
thd.start()
try:
yield
finally:
event_loop.call_soon_threadsafe(event_loop.stop)
@pytest.fixture()
def w3(open_port, start_websocket_server):
# need new event loop as the one used by server is already running
event_loop = asyncio.new_event_loop()
endpoint_uri = 'ws://127.0.0.1:{}'.format(open_port)
event_loop.run_until_complete(wait_for_ws(endpoint_uri, event_loop))
provider = WebsocketProvider(endpoint_uri, websocket_timeout=0.01)
return Web3(provider)
def test_websocket_provider_timeout(w3):
with pytest.raises(TimeoutError):
w3.eth.accounts
def test_restricted_websocket_kwargs():
invalid_kwargs = {'uri': 'ws://127.0.0.1:8546'}
re_exc_message = r'.*found: {0}*'.format(set(invalid_kwargs.keys()))
with pytest.raises(ValidationError, match=re_exc_message):
WebsocketProvider(websocket_kwargs=invalid_kwargs)
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from datetime import datetime
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
import colorama
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testsuite.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testsuite.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.instance.status = "failed"
self.instance.execution_time = handler_time
for tc in self.instance.testcases:
tc.status = "failed"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.instance.status = "failed"
self.instance.execution_time = handler_time
self.instance.reason = "RunID mismatch"
for tc in self.instance.testcases:
tc.status = "failed"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
self.seed = None
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
# Only valid for native_posix
if self.seed is not None:
command = command + ["--seed="+str(self.seed)]
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if not self.terminated and self.returncode != 0:
self.instance.status = "failed"
if run_valgrind and self.returncode == 2:
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.instance.reason = "Failed"
elif harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "failed"
self.instance.reason = "Timeout"
self.instance.add_missing_case_status("blocked", "Timeout")
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
# ignore SerialException which may happen during the serial device
# power off/on process.
except serial.SerialException:
pass
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testsuite.harness_config.get("fixture")
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
elif runner == "intel_adsp":
command.append("--pty")
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.instance.status = "failed"
self.instance.reason = "Serial Device Error"
logger.error("Serial device error: %s" % (str(e)))
self.instance.add_missing_case_status("blocked", "Serial Device Error")
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
return
ser.flush()
# turns out the ser.flush() is not enough to clear serial leftover from last case
# explicitly readline() can do it reliably
old_timeout = ser.timeout
ser.timeout = 1 # wait for 1s if no serial output
leftover_lines = ser.readlines(1000) # or read 1000 lines at most
for line in leftover_lines:
logger.debug(f"leftover log of previous test: {line}")
ser.timeout = old_timeout
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=60)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.status = "error"
self.instance.reason = "Device issue (Flash error?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
except subprocess.TimeoutExpired:
logger.warning("Flash operation timed out.")
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.status = "error"
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "error"
self.instance.reason = "No Console Output(Timeout)"
if self.instance.status == "error":
self.instance.add_missing_case_status("blocked", self.instance.reason)
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testsuite.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
handler.instance.execution_time = handler_time
if out_state == "timeout":
handler.instance.status = "failed"
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.status = "failed"
handler.instance.reason = "Failed"
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.status = "failed"
handler.instance.reason = out_state
else:
handler.instance.status = out_state
handler.instance.reason = "Unknown"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.instance.status = "failed"
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.instance.add_missing_case_status("blocked")
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestSuite.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
def __init__(self, name=None, testsuite=None):
self.duration = 0
self.name = name
self.status = None
self.reason = None
self.testsuite = testsuite
self.output = ""
self.freeform = False
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<TestCase %s with %s>" % (self.name, self.status)
def __str__(self):
return self.name
class TestSuite(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testsuite_root, workdir, name):
"""TestSuite constructor.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestSuite instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testsuite_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testsuite_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.testcases = []
self.name = self.get_unique(testsuite_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.platform_type = []
self.toolchain_exclude = None
self.toolchain_allow = None
self.ts_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
def add_testcase(self, name, freeform=False):
tc = TestCase(name=name, testsuite=self)
tc.freeform = freeform
self.testcases.append(tc)
@staticmethod
def get_unique(testsuite_root, workdir, name):
canonical_testsuite_root = os.path.realpath(testsuite_root)
if Path(canonical_zephyr_base) in Path(canonical_testsuite_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testsuite_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testsuite_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
# if testcases are provided as part of the yaml, skip this step.
if not self.testcases:
# only add each testcase once
for sub in set(subcases):
name = "{}.{}".format(self.id, sub)
self.add_testcase(name)
if not subcases:
self.add_testcase(self.id, freeform=True)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestSuite on a platform
@param test The TestSuite object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite
self.platform = platform
self.status = None
self.filters = []
self.reason = "Unknown"
self.filter_type = None
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.execution_time = 0
self.name = os.path.join(platform.name, testsuite.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
self.run = False
self.testcases = []
self.init_cases()
# Fix an issue with copying objects from testsuite, need better solution.
def init_cases(self):
for c in self.testsuite.testcases:
self.add_testcase(c.name, freeform=c.freeform)
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def add_filter(self, reason, filter_type):
self.filters.append({'type': filter_type, 'reason': reason })
self.status = "filtered"
self.reason = reason
self.filter_type = filter_type
def add_missing_case_status(self, status, reason=None):
for case in self.testcases:
if not case.status:
case.status = status
if reason:
case.reason = reason
else:
case.reason = self.reason
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
def set_case_status_by_name(self, name, status, reason=None):
tc = self.get_case_or_create(name)
tc.status = status
if reason:
tc.reason = reason
return tc
def add_testcase(self, name, freeform=False):
tc = TestCase(name=name)
tc.freeform = freeform
self.testcases.append(tc)
return tc
def get_case_by_name(self, name):
for c in self.testcases:
if c.name == name:
return c
return None
def get_case_or_create(self, name):
for c in self.testcases:
if c.name == name:
return c
logger.debug(f"Could not find a matching testcase for {name}")
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testsuite.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testsuite.build_only:
return False
# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
return testsuite_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testsuite.extra_configs:
content = "\n".join(self.testsuite.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testsuite_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testsuite.extra_sections)
def __repr__(self):
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testsuite, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testsuite = testsuite
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
if not self.instance.run:
self.instance.add_missing_case_status("skipped", "Test was built only")
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
overflow_found = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if overflow_found and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(overflow_found[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(overflow_found[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
for tc in self.instance.testcases:
tc.status = self.instance.status
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testsuite, platform, source_dir, build_dir):
super().__init__(testsuite, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testsuite and self.testsuite.ts_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testsuite.ts_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testsuite.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testsuite.name): True}
else:
return {os.path.join(self.platform.name, self.testsuite.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testsuite, instance.platform, instance.testsuite.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
self.seed = kwargs.get('seed', 0)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testsuite.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
# Here we check the runtime filter results coming from running cmake
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "filtered"
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
self.instance.add_missing_case_status("skipped")
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == "skipped":
results.skipped_runtime += 1
self.instance.add_missing_case_status("skipped", self.instance.reason)
if res.get('returncode', 1) > 0:
self.instance.add_missing_case_status("blocked", self.instance.reason)
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
# below ones are needed to make --test-only work as well
'Makefile',
'CMakeCache.txt',
'build.ninja',
'CMakeFiles/rules.ninja'
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed"]:
if instance.status == "error":
results.error += 1
else:
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status in ["skipped", "filtered"]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for case in instance.testcases:
if case.status == 'skipped':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status in ["skipped", "filtered"]:
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.execution_time
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
if ( instance.status in ["error", "failed", "timeout", "flash_error"]
and hasattr(self.instance.handler, 'seed')
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testsuite.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
if('CONFIG_FAKE_ENTROPY_NATIVE_POSIX' in self.defconfig and
self.defconfig['CONFIG_FAKE_ENTROPY_NATIVE_POSIX'] == 'y'):
instance.handler.seed = self.seed
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.execution_time
class Filters:
# filters provided on command line by the user/tester
CMD_LINE = 'command line filter'
# filters in the testsuite yaml definition
TESTSUITE = 'testsuite filter'
# filters realted to platform definition
PLATFORM = 'Platform related filter'
# in case a testcase was quarantined.
QUARENTINE = 'Quarantine filter'
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
ts_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testsuite_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"testcases": {"type": "list", "default": []},
"platform_type": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0}
}
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testsuite_roots=[], outdir=None):
self.roots = testsuite_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.detailed_skipped_report = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
self.seed = 0
# Keep track of which test cases we've filtered out and why
self.testsuites = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
self.timestamp = datetime.now().isoformat()
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testsuite.testcases)
if instance.status == 'filtered':
results.skipped_filter += 1
results.skipped_configs += 1
elif instance.status == 'passed':
results.passed += 1
results.done += 1
elif instance.status == 'error':
results.error += 1
results.done += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
jt = json.load(fp)
for ts in jt.get("testsuites", []):
d = {}
for m, _, _ in interesting_metrics:
d[m] = ts.get(m, 0)
ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values():
mkey = (instance.testsuite.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testsuite.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
# FIXME: need a better way to identify executed tests
handler_time = instance.metrics.get('handler_time', 0)
if float(handler_time) > 0:
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed + results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
json_file = filename + ".json"
self.json_report(json_file, version=self.version)
self.xunit_report(json_file, filename + ".xml", full_report=False)
self.xunit_report(json_file, filename + "_report.xml", full_report=True)
self.xunit_report_suites(json_file, filename + "_suite_report.xml")
if platform_reports:
self.target_report(json_file, outdir, suffix)
def target_report(self, json_file, outdir, suffix):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(json_file, filename, platform, full_report=True)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
testcases = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
testcases.append(case)
return testcases
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testsuites(self, testsuite_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTSUITE_FILENAME in filenames:
filename = self.TESTSUITE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
ts_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(ts_path, self.ts_schema)
parsed_data.load()
ts_path = os.path.dirname(ts_path)
workdir = os.path.relpath(ts_path, root)
for name in parsed_data.tests.keys():
ts = TestSuite(root, workdir, name)
ts_dict = parsed_data.get_test(name, self.testsuite_valid_keys)
ts.source_dir = ts_path
ts.yamlfile = ts_path
ts.type = ts_dict["type"]
ts.tags = ts_dict["tags"]
ts.extra_args = ts_dict["extra_args"]
ts.extra_configs = ts_dict["extra_configs"]
ts.arch_allow = ts_dict["arch_allow"]
ts.arch_exclude = ts_dict["arch_exclude"]
ts.skip = ts_dict["skip"]
ts.platform_exclude = ts_dict["platform_exclude"]
ts.platform_allow = ts_dict["platform_allow"]
ts.platform_type = ts_dict["platform_type"]
ts.toolchain_exclude = ts_dict["toolchain_exclude"]
ts.toolchain_allow = ts_dict["toolchain_allow"]
ts.ts_filter = ts_dict["filter"]
ts.timeout = ts_dict["timeout"]
ts.harness = ts_dict["harness"]
ts.harness_config = ts_dict["harness_config"]
if ts.harness == 'console' and not ts.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
ts.build_only = ts_dict["build_only"]
ts.build_on_all = ts_dict["build_on_all"]
ts.slow = ts_dict["slow"]
ts.min_ram = ts_dict["min_ram"]
ts.modules = ts_dict["modules"]
ts.depends_on = ts_dict["depends_on"]
ts.min_flash = ts_dict["min_flash"]
ts.extra_sections = ts_dict["extra_sections"]
ts.integration_platforms = ts_dict["integration_platforms"]
ts.seed = ts_dict["seed"]
testcases = ts_dict.get("testcases", [])
if testcases:
for tc in testcases:
ts.add_testcase(name=f"{name}.{tc}")
else:
ts.parse_subcases(ts_path)
if testsuite_filter:
if ts.name and ts.name in testsuite_filter:
self.testsuites[ts.name] = ts
else:
self.testsuites[ts.name] = ts
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (ts_path, e))
self.load_errors += 1
return len(self.testsuites)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_platform=[]):
with open(file, "r") as json_test_plan:
jtp = json.load(json_test_plan)
instance_list = []
for ts in jtp.get("testsuites", []):
logger.debug(f"loading {ts['name']}...")
testsuite = ts["name"]
platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
if ts.get("run_id"):
instance.run_id = ts.get("run_id")
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
status = ts.get('status', None)
reason = ts.get("reason", "Unknown")
if status in ["error", "failed"]:
instance.status = None
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.test_only:
instance.status = None
instance.reason = None
else:
instance.status = status
instance.reason = reason
for tc in ts.get('testcases', []):
identifier = tc['identifier']
tc_status = tc.get('status', None)
tc_reason = None
# we set reason only if status is valid, it might have been
# reset above...
if instance.status:
tc_reason = tc.get('reason')
if tc_status:
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
case.duration = tc.get('execution_time', 0)
if tc.get('log'):
case.output = tc.get('log')
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testsuite_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testsuite list...")
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
platform_scope = self.platforms
elif ts.integration_platforms and self.integration:
self.verify_platforms_existence(
ts.integration_platforms, f"{ts_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in ts.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and ts.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
# list of instances per testsuite, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
instance.add_filter("Platform is excluded on command line.", Filters.CMD_LINE)
if (plat.arch == "unit") != (ts.type == "unit"):
# Discard silently
continue
if ts.modules and self.modules:
if not set(ts.modules).issubset(set(self.modules)):
instance.add_filter(f"one or more required modules not available: {','.join(ts.modules)}", Filters.TESTSUITE)
if runnable and not instance.run:
instance.add_filter("Not runnable on device", Filters.PLATFORM)
if self.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
instance.add_filter("Not part of integration platforms", Filters.TESTSUITE)
if ts.skip:
instance.add_filter("Skip filter", Filters.TESTSUITE)
if tag_filter and not ts.tags.intersection(tag_filter):
instance.add_filter("Command line testsuite tag filter", Filters.CMD_LINE)
if exclude_tag and ts.tags.intersection(exclude_tag):
instance.add_filter("Command line testsuite exclude filter", Filters.CMD_LINE)
if testsuite_filter and ts_name not in testsuite_filter:
instance.add_filter("TestSuite name filter", Filters.CMD_LINE)
if arch_filter and plat.arch not in arch_filter:
instance.add_filter("Command line testsuite arch filter", Filters.CMD_LINE)
if not force_platform:
if ts.arch_allow and plat.arch not in ts.arch_allow:
instance.add_filter("Not in test case arch allow list", Filters.TESTSUITE)
if ts.arch_exclude and plat.arch in ts.arch_exclude:
instance.add_filter("In test case arch exclude", Filters.TESTSUITE)
if ts.platform_exclude and plat.name in ts.platform_exclude:
instance.add_filter("In test case platform exclude", Filters.TESTSUITE)
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
instance.add_filter("In test case toolchain exclude", Filters.TESTSUITE)
if platform_filter and plat.name not in platform_filter:
instance.add_filter("Command line platform filter", Filters.CMD_LINE)
if ts.platform_allow and plat.name not in ts.platform_allow:
instance.add_filter("Not in testsuite platform allow list", Filters.TESTSUITE)
if ts.platform_type and plat.type not in ts.platform_type:
instance.add_filter("Not in testsuite platform type list", Filters.TESTSUITE)
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
instance.add_filter("Not in testsuite toolchain allow list", Filters.TESTSUITE)
if not plat.env_satisfied:
instance.add_filter("Environment ({}) not satisfied".format(", ".join(plat.env)), Filters.PLATFORM)
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and ts.type != 'unit':
instance.add_filter("Not supported by the toolchain", Filters.PLATFORM)
if plat.ram < ts.min_ram:
instance.add_filter("Not enough RAM", Filters.PLATFORM)
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
instance.add_filter("No hardware support", Filters.PLATFORM)
if plat.flash < ts.min_flash:
instance.add_filter("Not enough FLASH", Filters.PLATFORM)
if set(plat.ignore_tags) & ts.tags:
instance.add_filter("Excluded tags per platform (exclude_tags)", Filters.PLATFORM)
if plat.only_tags and not set(plat.only_tags) & ts.tags:
instance.add_filter("Excluded tags per platform (only_tags)", Filters.PLATFORM)
test_configuration = ".".join([instance.platform.name,
instance.testsuite.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
instance.add_filter(f"Quarantine: {self.quarantine[test_configuration]}", Filters.QUARENTINE)
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
instance.add_filter("Not under quarantine", Filters.QUARENTINE)
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testsuite
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not ts.build_on_all and not integration:
if ts.platform_allow:
a = set(self.default_platforms)
b = set(ts.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda ts: ts.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
instance.add_filter("Not an emulated platform", Filters.PLATFORM)
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.selected_platforms = set(p.platform.name for p in self.instances.values())
filtered_instances = list(filter(lambda item: item.status == "filtered", self.instances.values()))
for filtered_instance in filtered_instances:
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and filtered_instance.platform.name in filtered_instance.testsuite.integration_platforms \
and "Quarantine" not in filtered_instance.reason:
# Do not treat this as error if filter type is command line
filters = {t['type'] for t in filtered_instance.filters}
if Filters.CMD_LINE in filters:
continue
filtered_instance.status = "error"
filtered_instance.reason += " but is one of the integration platforms"
self.instances[filtered_instance.name] = filtered_instance
filtered_instance.add_missing_case_status(filtered_instance.status)
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped', 'filtered']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check,
seed=self.seed
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
@staticmethod
def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip):
fails, passes, errors, skips = stats
if status in ['skipped', 'filtered']:
duration = 0
eleTestcase = ET.SubElement(
eleTestsuite, "testcase",
classname=classname,
name=f"{name}",
time=f"{duration}")
if status in ['skipped', 'filtered']:
skips += 1
# temporarily add build_only_as_skip to restore existing CI report behaviour
if ts_status == "passed" and not runnable:
tc_type = "build"
else:
tc_type = status
ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}")
elif status in ["failed", "blocked"]:
fails += 1
el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == "error":
errors += 1
el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == 'passed':
if not runnable and build_only_as_skip:
ET.SubElement(eleTestcase, 'skipped', type="build", message="built only")
skips += 1
else:
passes += 1
else:
if not status:
logger.debug(f"{name}: No status")
ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?")
else:
logger.error(f"{name}: Unknown status '{status}'")
return (fails, passes, errors, skips)
# Generate a report with all testsuites instead of doing this per platform
def xunit_report_suites(self, json_file, filename):
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
suites_to_report = all_suites
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites))
for suite in suites_to_report:
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=suite.get("name"), time="0",
timestamp = self.timestamp,
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform"))
ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch"))
total = 0
fails = passes = errors = skips = 0
handler_time = suite.get('execution_time', 0)
runnable = suite.get('runnable', 0)
duration += float(handler_time)
ts_status = suite.get('status')
for tc in suite.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', suite.get('reason', 'Unknown'))
log = tc.get("log", suite.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def xunit_report(self, json_file, filename, selected_platform=None, full_report=False):
if selected_platform:
selected = [selected_platform]
logger.info(f"Writing target report for {selected_platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
for platform in selected:
suites = list(filter(lambda d: d['platform'] == platform, all_suites))
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites))
if not non_filtered:
continue
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=platform,
timestamp = self.timestamp,
time="0",
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
total = 0
fails = passes = errors = skips = 0
for ts in suites:
handler_time = ts.get('execution_time', 0)
runnable = ts.get('runnable', 0)
duration += float(handler_time)
ts_status = ts.get('status')
# Do not report filtered testcases
if ts_status == 'filtered' and not self.detailed_skipped_report:
continue
if full_report:
for tc in ts.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', ts.get('reason', 'Unknown'))
log = tc.get("log", ts.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
else:
reason = ts.get('reason', 'Unknown')
name = ts.get("name")
classname = f"{platform}:{name}"
log = ts.get("log")
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, ts_status, ts_status, reason, duration, runnable,
(fails, passes, errors, skips), log, False)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def json_report(self, filename, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
suites = []
for instance in self.instances.values():
suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
suite = {
"name": instance.testsuite.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
}
if instance.run_id:
suite['run_id'] = instance.run_id
suite["runnable"] = False
if instance.status != 'filtered':
suite["runnable"] = instance.run
if ram_size:
suite["ram_size"] = ram_size
if rom_size:
suite["rom_size"] = rom_size
if instance.status in ["error", "failed"]:
suite['status'] = instance.status
suite["reason"] = instance.reason
# FIXME
if os.path.exists(handler_log):
suite["log"] = self.process_log(handler_log)
elif os.path.exists(device_log):
suite["log"] = self.process_log(device_log)
else:
suite["log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
elif instance.status == 'passed':
suite["status"] = "passed"
elif instance.status == 'skipped':
suite["status"] = "skipped"
suite["reason"] = instance.reason
if instance.status is not None:
suite["execution_time"] = f"{float(handler_time):.2f}"
testcases = []
if len(instance.testcases) == 1:
single_case_duration = f"{float(handler_time):.2f}"
else:
single_case_duration = 0
for case in instance.testcases:
# freeform was set when no sub testcases were parsed, however,
# if we discover those at runtime, the fallback testcase wont be
# needed anymore and can be removed from the output, it does
# not have a status and would otherwise be reported as skipped.
if case.freeform and case.status is None and len(instance.testcases) > 1:
continue
testcase = {}
testcase['identifier'] = case.name
if instance.status:
if single_case_duration:
testcase['execution_time'] = single_case_duration
else:
testcase['execution_time'] = f"{float(case.duration):.2f}"
if case.output != "":
testcase['log'] = case.output
if case.status == "skipped":
if instance.status == "filtered":
testcase["status"] = "filtered"
else:
testcase["status"] = "skipped"
testcase["reason"] = case.reason or instance.reason
else:
testcase["status"] = case.status
if case.reason:
testcase["reason"] = case.reason
testcases.append(testcase)
suite['testcases'] = testcases
suites.append(suite)
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testsuite(self, identifier):
results = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
if case == identifier:
results.append(ts)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
runner_params=None,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.runner_params = runner_params
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
runner_params = dut.get('runner_params')
serial_pty = dut.get('serial_pty')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
connected= dut.get('connected') and ((serial or serial_pty) is not None)
new_dut = DUT(platform=platform,
product=product,
runner=runner,
runner_params=runner_params,
id=id,
serial_pty=serial_pty,
serial=serial,
serial_baud=baud,
connected=connected,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x.get('id', ''))
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
def init(colorama_strip):
colorama.init(strip=colorama_strip)
|
test_add_vectors.py
|
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
table_id = "test_add"
ADD_TIMEOUT = 60
nprobe = 1
tag = "1970-01-01"
class TestAddBase:
"""
******************************************************************
The following cases are used to test `add_vectors / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_table(self, connect, table):
'''
target: test add vector, then create table again
method: add vector and create table
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
param = {'table_name': table,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
assert not status.OK()
def test_add_vector_has_table(self, connect, table):
'''
target: test add vector, then check table existence
method: add vector and call HasTable
expected: table exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
assert assert_has_table(connect, table)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_delete_table_add_vector(self, connect, table):
'''
target: test add vector after table deleted
method: delete table and add vector
expected: status not ok
'''
status = connect.delete_table(table)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_delete_table_add_vector_another(self, connect, table):
'''
target: test add vector to table_1 after table_2 deleted
method: delete table_2 and add vector to table_1
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
status = connect.delete_table(table)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['table_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_table(self, connect, table):
'''
target: test delete table after add vector
method: add vector and delete table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
status = connect.delete_table(table)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_table(self, connect, table):
'''
target: test delete table_1 table after add vector to table_2
method: add vector and delete table
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_table(self, connect, table):
'''
target: test delete table after add vector for a while
method: add vector, sleep, and delete table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status = connect.delete_table(table)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_table(self, connect, table):
'''
target: test delete table_1 table after add vector to table_2 for a while
method: add vector , sleep, and delete table
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status = connect.delete_table(param['table_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, table, get_simple_index_params):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
status = connect.create_index(table, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
status = connect.create_index(table, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
connect.delete_table(param['table_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, table, get_simple_index_params):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index_params
logging.getLogger().info(index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
status = connect.create_index(table, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
status = connect.create_index(param['table_name'], index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, table, get_simple_index_params):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index_params
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status = connect.create_index(table, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status = connect.create_index(param['table_name'], index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, table):
'''
target: test add vector after search table
method: search table and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(table, 1, nprobe, vector)
status, ids = connect.add_vectors(table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, table):
'''
target: test add vector to table_1 after search table_2
method: search table and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(table, 1, nprobe, vector)
status, ids = connect.add_vectors(param['table_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, table):
'''
target: test search vector after add vector
method: add vector and search table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
status, result = connect.search_vectors(table, 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, table):
'''
target: test add vector to table_1 after search table_2
method: search table and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, table):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status, result = connect.search_vectors(table, 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, table):
'''
target: test add vector to table_1 after search table_2 a while
method: search table , sleep, and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(table, vector)
time.sleep(1)
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, table):
'''
target: test add vectors in table, use customize ids
method: create table and add vectors in it, check the ids returned and the table length after vectors added
expected: the length of ids and the table row count
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(table, vectors, ids)
time.sleep(2)
assert status.OK()
assert len(ids) == nq
# check search result
status, result = connect.search_vectors(table, top_k, nprobe, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, table):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(table, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(table, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, table):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(table, vectors)
assert status.OK()
status, ids = connect.add_vectors(table, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, table):
'''
target: test add vectors in table, use customize ids, len(ids) != len(vectors)
method: create table and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(table, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, table, get_vector_id):
'''
target: test add vectors in table, use customize ids, which are not int64
method: create table and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for _ in range(nq)]
with pytest.raises(Exception):
connect.add_vectors(table, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, table):
'''
target: test add vectors in table created before
method: create table and add vectors in it, check the ids returned and the table length after vectors added
expected: the length of ids and the table row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(table, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag(self, connect, table):
'''
target: test add vectors in table created before
method: create table and add vectors in it, with the partition_tag param
expected: the table row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(table, tag)
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_A(self, connect, table):
'''
target: test add vectors in table created before
method: create partition and add vectors in it
expected: the table row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(table, tag)
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed(self, connect, table):
'''
target: test add vectors in table created before
method: create table and add vectors in it, with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed_A(self, connect, table):
'''
target: test add vectors in table created before
method: create partition, add vectors with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
new_tag = "new_tag"
status = connect.create_partition(table, tag)
status, ids = connect.add_vectors(table, vectors, partition_tag=new_tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_existed(self, connect, table):
'''
target: test add vectors in table created before
method: create table and add vectors in it repeatly, with the partition_tag param
expected: the table row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(table, tag)
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
for i in range(5):
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.level(2)
def test_add_vectors_without_connect(self, dis_connect, table):
'''
target: test add vectors without connection
method: create table and add vectors in it, check if added successfully
expected: raise exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
with pytest.raises(Exception) as e:
status, ids = dis_connect.add_vectors(table, vectors)
def test_add_table_not_existed(self, connect):
'''
target: test add vectors in table, which not existed before
method: add vectors table not existed, check the status
expected: status not ok
'''
nq = 5
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(gen_unique_str("not_exist_table"), vector)
assert not status.OK()
assert not ids
def test_add_vector_dim_not_matched(self, connect, table):
'''
target: test add vector, the vector dimension is not equal to the table dimension
method: the vector dimension is half of the table dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(table, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, table):
'''
target: test add vectors, the vector dimension is not equal to the table dimension
method: the vectors dimension is half of the table dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(table, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, table):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(table, vectors)
time.sleep(3)
status, result = connect.search_vectors(table, 1, nprobe, [vectors[0]])
assert status.OK()
assert len(result) == 1
# TODO: enable
# @pytest.mark.repeat(10)
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_add_vector_with_multiprocessing(self, args):
'''
target: test add vectors, with multi processes
method: 10 processed add vectors concurrently
expected: status ok and result length is equal to the length off added vectors
'''
table = gen_unique_str()
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
milvus.create_table(param)
vector = gen_single_vector(dim)
process_num = 4
loop_num = 5
processes = []
def add():
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
i = 0
while i < loop_num:
status, ids = milvus.add_vectors(table, vector)
i = i + 1
milvus.disconnect()
for i in range(process_num):
p = Process(target=add, args=())
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
time.sleep(2)
status, count = milvus.get_table_row_count(table)
assert count == process_num * loop_num
def test_add_vector_multi_tables(self, connect):
'''
target: test add vectors is correct or not with multiple tables of L2
method: create 50 tables and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_add_vector_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_table(param)
time.sleep(5)
for j in range(5):
for i in range(20):
status, ids = connect.add_vectors(table_name=table_list[i], records=vectors)
assert status.OK()
class TestAddIP:
"""
******************************************************************
The following cases are used to test `add_vectors / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_table(self, connect, ip_table):
'''
target: test add vector, then create table again
method: add vector and create table
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
param = {'table_name': ip_table,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
assert not status.OK()
def test_add_vector_has_table(self, connect, ip_table):
'''
target: test add vector, then check table existence
method: add vector and call HasTable
expected: table exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
assert assert_has_table(connect, ip_table)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_delete_table_add_vector(self, connect, ip_table):
'''
target: test add vector after table deleted
method: delete table and add vector
expected: status not ok
'''
status = connect.delete_table(ip_table)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_delete_table_add_vector_another(self, connect, ip_table):
'''
target: test add vector to table_1 after table_2 deleted
method: delete table_2 and add vector to table_1
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
status = connect.delete_table(ip_table)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['table_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_table(self, connect, ip_table):
'''
target: test delete table after add vector
method: add vector and delete table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status = connect.delete_table(ip_table)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_table(self, connect, ip_table):
'''
target: test delete table_1 table after add vector to table_2
method: add vector and delete table
expected: status ok
'''
param = {'table_name': 'test_add_vector_delete_another_table',
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status = connect.delete_table(param['table_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_table(self, connect, ip_table):
'''
target: test delete table after add vector for a while
method: add vector, sleep, and delete table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status = connect.delete_table(ip_table)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_table(self, connect, ip_table):
'''
target: test delete table_1 table after add vector to table_2 for a while
method: add vector , sleep, and delete table
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status = connect.delete_table(param['table_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, ip_table, get_simple_index_params):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
status = connect.create_index(ip_table, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, ip_table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
status = connect.create_index(ip_table, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, ip_table, get_simple_index_params):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index_params
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(ip_table, index_param)
if str(mode) == "GPU" and (index_param["index_type"] == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, ip_table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status = connect.create_index(param['table_name'], index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, ip_table, get_simple_index_params):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index_params
if index_param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip some PQ cases")
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status = connect.create_index(ip_table, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, ip_table, get_simple_index_params):
'''
target: test add vector to table_2 after build index for table_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index_params
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status = connect.create_index(param['table_name'], index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, ip_table):
'''
target: test add vector after search table
method: search table and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_table, 1, nprobe, vector)
status, ids = connect.add_vectors(ip_table, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, ip_table):
'''
target: test add vector to table_1 after search table_2
method: search table and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_table, 1, nprobe, vector)
status, ids = connect.add_vectors(param['table_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, ip_table):
'''
target: test search vector after add vector
method: add vector and search table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status, result = connect.search_vectors(ip_table, 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, ip_table):
'''
target: test add vector to table_1 after search table_2
method: search table and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, ip_table):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search table
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status, result = connect.search_vectors(ip_table, 1, nprobe, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, ip_table):
'''
target: test add vector to table_1 after search table_2 a while
method: search table , sleep, and add vector
expected: status ok
'''
param = {'table_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_table(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_table, vector)
time.sleep(1)
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, ip_table):
'''
target: test add vectors in table, use customize ids
method: create table and add vectors in it, check the ids returned and the table length after vectors added
expected: the length of ids and the table row count
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_table, vectors, ids)
time.sleep(2)
assert status.OK()
assert len(ids) == nq
# check search result
status, result = connect.search_vectors(ip_table, top_k, nprobe, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, ip_table):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_table, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(ip_table, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, ip_table):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1; nprobe = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_table, vectors)
assert status.OK()
status, ids = connect.add_vectors(ip_table, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, ip_table):
'''
target: test add vectors in table, use customize ids, len(ids) != len(vectors)
method: create table and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_table, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, ip_table, get_vector_id):
'''
target: test add vectors in table, use customize ids, which are not int64
method: create table and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for i in range(nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_table, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, ip_table):
'''
target: test add vectors in table created before
method: create table and add vectors in it, check the ids returned and the table length after vectors added
expected: the length of ids and the table row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_table, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.level(2)
def test_add_vectors_without_connect(self, dis_connect, ip_table):
'''
target: test add vectors without connection
method: create table and add vectors in it, check if added successfully
expected: raise exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
with pytest.raises(Exception) as e:
status, ids = dis_connect.add_vectors(ip_table, vectors)
def test_add_vector_dim_not_matched(self, connect, ip_table):
'''
target: test add vector, the vector dimension is not equal to the table dimension
method: the vector dimension is half of the table dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(ip_table, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, ip_table):
'''
target: test add vectors, the vector dimension is not equal to the table dimension
method: the vectors dimension is half of the table dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(ip_table, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, ip_table):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_table, vectors)
time.sleep(3)
status, result = connect.search_vectors(ip_table, 1, nprobe, [vectors[0]])
assert status.OK()
assert len(result) == 1
def test_add_vector_multi_tables(self, connect):
'''
target: test add vectors is correct or not with multiple tables of IP
method: create 50 tables and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_add_vector_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_table(param)
time.sleep(2)
for j in range(10):
for i in range(20):
status, ids = connect.add_vectors(table_name=table_list[i], records=vectors)
assert status.OK()
class TestAddAdvance:
@pytest.fixture(
scope="function",
params=[
1,
10,
100,
1000,
pytest.param(5000 - 1, marks=pytest.mark.xfail),
pytest.param(5000, marks=pytest.mark.xfail),
pytest.param(5000 + 1, marks=pytest.mark.xfail),
],
)
def insert_count(self, request):
yield request.param
def test_insert_much(self, connect, table, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(table, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_ip(self, connect, ip_table, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(ip_table, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_jaccard(self, connect, jac_table, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(jac_table, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_hamming(self, connect, ham_table, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(ham_table, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_tanimoto(self, connect, tanimoto_table, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(tanimoto_table, insert_vec_list)
assert len(ids) == nb
assert status.OK()
class TestNameInvalid(object):
"""
Test adding vectors with invalid table names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_table_names()
)
def get_table_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_table_names()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_with_invalid_table_name(self, connect, get_table_name):
table_name = get_table_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(table_name, vectors)
assert not status.OK()
@pytest.mark.level(2)
def test_add_vectors_with_invalid_tag_name(self, connect, get_table_name, get_tag_name):
table_name = get_table_name
tag_name = get_tag_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(table_name, vectors, partition_tag=tag_name)
assert not status.OK()
class TestAddTableVectorsInvalid(object):
single_vector = gen_single_vector(dim)
vectors = gen_vectors(2, dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def gen_vector(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vector_with_invalid_vectors(self, connect, table, gen_vector):
tmp_single_vector = copy.deepcopy(self.single_vector)
tmp_single_vector[0][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(table, tmp_single_vector)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors(self, connect, table, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(table, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_table, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(jac_table, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_table, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(ham_table, tmp_vectors)
|
semantic.py
|
"""
Uses semantic to obtain an AST for a given code snippet in a language supported by semantic.
This code is based on a semantic version that still supports the `--json-graph` option! Newer versions of semantic
have dropped that command line option and it is unclear if it will return! To ensure this code will work with semantic,
build a semantic from a revision before Mar 27, 2020! E.g.,
https://github.com/github/semantic/tree/34ea0d1dd6ac1a142e2215f097f17abeed66de34
"""
import glob
import json
import os
import shutil
import subprocess
import threading
from collections import OrderedDict
from code_transformer.env import SEMANTIC_EXECUTABLE
from code_transformer.utils.log import get_logger
logger = get_logger(__file__)
TEMP_PIPE = "/tmp/semantic-temp-pipe"
SEMANTIC_CMD = [SEMANTIC_EXECUTABLE]
if shutil.which(" ".join(SEMANTIC_CMD)) is None:
assert shutil.which("semantic") is not None, f"Could not locate semantic executable in {SEMANTIC_CMD}! Is the path correct?"
logger.warn(f"Could not locate semantic executable in {SEMANTIC_CMD}! Falling back to semantic executable found "
f"on PATH")
SEMANTIC_CMD = ["semantic"]
language_file_extensions = {
"python": "py",
"javascript": "js",
"ruby": "rb",
"typescript": "ts",
"go": "go",
"json": "json",
"jsx": "jsx",
"php": "php"
}
def run_semantic(command, arg, output_type, *files, quiet=True):
assert shutil.which(" ".join(SEMANTIC_CMD)) is not None, f"Could not locate semantic executable in {SEMANTIC_CMD}! Is the path correct?"
call = []
call.extend(SEMANTIC_CMD)
call.append(arg)
call.extend([command, output_type])
call.extend(files)
cabal_call = subprocess.Popen(" ".join(call), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, shell=True)
output, errors = cabal_call.communicate()
cabal_call.wait()
# if output of a command is valid json, already parse it
if output_type == '--json' or output_type == '--json-graph' or output_type == '--symbols':
output = json.loads(output, object_pairs_hook=OrderedDict)
if not errors == "":
# We can filter the erroneous files
successful_parses = []
successful_runs = []
if output_type == '--json-graph':
for i, file in enumerate(output['files']):
if 'errors' in file:
if not quiet:
for error in file['errors']:
logger.error(f"{file['path']}: {error['error']}")
elif 'Error' not in [vertex['term'] for vertex in file['vertices']]:
successful_parses.append(file)
successful_runs.append(i)
# we need to return the indices for successful parses for the caller
return {'files': successful_parses}, successful_runs
else:
raise Exception(errors)
# Returning None as successful_runs means that all runs were successful
return output, None
def run_semantic_parse(arg, output_type, *files, quiet=True):
return run_semantic("parse", arg, output_type, *files, quiet=quiet)
def semantic_parse(language, arg, output_type, process_identifier, *code_snippets, quiet=True):
"""
Semantic only accepts files as input. To avoid unnecessary disk I/O, we create a named pipe (TEMP_PIPE) that is used for writing.
Semantic will then receive the named pipe and read from it as it was a file. Due to the nature of pipes where a write hangs until someone reads from the pipe,
we need an async write.
"""
def pipe_writer_worker(code, pipe_name):
with open(pipe_name, 'w') as temp_pipe:
temp_pipe.write(code)
if language not in language_file_extensions:
raise Exception(f"language `{language}` not supported by semantic")
if not isinstance(code_snippets, list) and not isinstance(code_snippets, tuple) and not isinstance(code_snippets,
set):
code_snippets = [code_snippets]
file_extension = language_file_extensions[language]
# Create temporary pipes
pipes_wildcard = f"{TEMP_PIPE}-{process_identifier}-*.{file_extension}"
cleanup_temp_pipes(process_identifier, file_extension)
for i, code in enumerate(code_snippets):
pipe_name = f"{TEMP_PIPE}-{process_identifier}-{i:05d}.{file_extension}"
if not os.path.exists(pipe_name):
os.mkfifo(pipe_name)
# Write to pipe asynchroneously
threading.Thread(target=pipe_writer_worker, args=(code, pipe_name)).start()
result = run_semantic_parse(arg, output_type, pipes_wildcard, quiet=quiet)
cleanup_temp_pipes(process_identifier, file_extension)
return result
def cleanup_temp_pipes(process_identifier, file_extension):
pipes_wildcard = f"{TEMP_PIPE}-{process_identifier}-*.{file_extension}"
if glob.glob(pipes_wildcard):
subprocess.Popen(f"rm {pipes_wildcard}", shell=True).communicate()
|
test_gc.py
|
import unittest
from test.test_support import verbose, run_unittest, start_threads
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n") in d
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (2, 0, 0))
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setcheckinterval(old_checkinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + range(5))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(u"a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print "restoring automatic collection"
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import six
import threading
import time
import unittest
from tempfile import mkdtemp
import sqlalchemy
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.net import get_hostname
from mock import Mock, patch, MagicMock, PropertyMock
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEquals(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be carefull, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEquals(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEquals(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEquals(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag1,
owner='airflow')
DummyOperator(
task_id='dummy_b',
dag=dag1,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
file_stream.py
|
import base64
import binascii
import collections
import logging
import threading
import requests
import time
import wandb
import itertools
from six.moves import queue
from wandb import util
from wandb import env
MAX_LINE_SIZE = 4*1024*1024 - 100*1024 # imposed by back end
logger = logging.getLogger(__name__)
Chunk = collections.namedtuple('Chunk', ('filename', 'data'))
class DefaultFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
return {
'offset': chunk_id,
'content': [c.data for c in chunks]
}
class JsonlFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
chunk_data = []
for chunk in chunks:
if len(chunk.data) > MAX_LINE_SIZE:
msg = 'Metric data exceeds maximum size of {} bytes. Dropping it.'.format(MAX_LINE_SIZE)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
else:
chunk_data.append(chunk.data)
return {
'offset': chunk_id,
'content': chunk_data,
}
class SummaryFilePolicy(object):
def process_chunks(self, chunks):
data = chunks[-1].data
if len(data) > MAX_LINE_SIZE:
msg = 'Summary data exceeds maximum size of {} bytes. Dropping it.'.format(MAX_LINE_SIZE)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
return False
return {
'offset': 0, 'content': [data]
}
class CRDedupeFilePolicy(object):
"""File stream policy that removes characters that would be erased by
carriage returns.
This is what a terminal does. We use it for console output to reduce the
amount of data we need to send over the network (eg. for progress bars),
while preserving the output's appearance in the web app.
"""
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
content = []
for line in [c.data for c in chunks]:
if content and content[-1].endswith('\r'):
content[-1] = line
else:
content.append(line)
chunk_id = self._chunk_id
self._chunk_id += len(content)
if content and content[-1].endswith('\r'):
self._chunk_id -= 1
return {
'offset': chunk_id,
'content': content
}
class BinaryFilePolicy(object):
def __init__(self):
self._offset = 0
def process_chunks(self, chunks):
data = b''.join([c.data for c in chunks])
enc = base64.b64encode(data).decode('ascii')
offset = self._offset
self._offset += len(data)
return {
'offset': self._offset,
'content': enc,
'encoding': 'base64'
}
class FileStreamApi(object):
"""Pushes chunks of files to our streaming endpoint.
This class is used as a singleton. It has a thread that serializes access to
the streaming endpoint and performs rate-limiting and batching.
TODO: Differentiate between binary/text encoding.
"""
Finish = collections.namedtuple('Finish', ('exitcode'))
HTTP_TIMEOUT = env.get_http_timeout(10)
MAX_ITEMS_PER_PUSH = 10000
def __init__(self, api, run_id):
self._api = api
self._run_id = run_id
self._client = requests.Session()
self._client.auth = ('api', api.api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update({
'User-Agent': api.user_agent,
'X-WANDB-USERNAME': env.get_username(),
'X-WANDB-USER-EMAIL': env.get_user_email()
})
self._file_policies = {}
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.daemon = True
self._init_endpoint()
def _init_endpoint(self):
settings = self._api.settings()
self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format(
base=settings['base_url'],
entity=settings['entity'],
project=settings['project'],
run=self._run_id)
def start(self):
self._init_endpoint()
self._thread.start()
def set_default_file_policy(self, filename, file_policy):
"""Set an upload policy for a file unless one has already been set.
"""
if filename not in self._file_policies:
self._file_policies[filename] = file_policy
def set_file_policy(self, filename, file_policy):
self._file_policies[filename] = file_policy
@property
def heartbeat_seconds(self):
# Defaults to 30
return self._api.dynamic_settings["heartbeat_seconds"]
def rate_limit_seconds(self):
run_time = time.time() - wandb.START_TIME
if run_time < 60:
return max(1, self.heartbeat_seconds / 15)
elif run_time < 300:
return max(2.5, self.heartbeat_seconds / 3)
else:
return max(5, self.heartbeat_seconds)
def _read_queue(self):
# called from the push thread (_thread_body), this does an initial read
# that'll block for up to rate_limit_seconds. Then it tries to read
# as much out of the queue as it can. We do this because the http post
# to the server happens within _thread_body, and can take longer than
# our rate limit. So next time we get a chance to read the queue we want
# read all the stuff that queue'd up since last time.
#
# If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread
# will get behind and data will buffer up in the queue.
return util.read_many_from_queue(
self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds())
def _thread_body(self):
posted_data_time = time.time()
posted_anything_time = time.time()
ready_chunks = []
finished = None
while finished is None:
items = self._read_queue()
for item in items:
if isinstance(item, self.Finish):
finished = item
else:
# item is Chunk
ready_chunks.append(item)
cur_time = time.time()
if ready_chunks and (finished or cur_time - posted_data_time > self.rate_limit_seconds()):
posted_data_time = cur_time
posted_anything_time = cur_time
self._send(ready_chunks)
ready_chunks = []
if cur_time - posted_anything_time > self.heartbeat_seconds:
posted_anything_time = cur_time
self._handle_response(util.request_with_retry(self._client.post,
self._endpoint, json={'complete': False, 'failed': False}))
# post the final close message. (item is self.Finish instance now)
util.request_with_retry(self._client.post,
self._endpoint, json={'complete': True, 'exitcode': int(finished.exitcode)})
def _handle_response(self, response):
"""Logs dropped chunks and updates dynamic settings"""
if isinstance(response, Exception):
raise response
wandb.termerror('Droppped streaming file chunk (see wandb/debug.log)')
logging.error("dropped chunk %s" % response)
elif response.json().get("limits"):
parsed = response.json()
self._api.dynamic_settings.update(parsed["limits"])
def _send(self, chunks):
# create files dict. dict of <filename: chunks> pairs where chunks is a list of
# [chunk_id, chunk_data] tuples (as lists since this will be json).
files = {}
# Groupby needs group keys to be consecutive, so sort first.
chunks.sort(key=lambda c: c.filename)
for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename):
file_chunks = list(file_chunks) # groupby returns iterator
self.set_default_file_policy(filename, DefaultFilePolicy())
files[filename] = self._file_policies[filename].process_chunks(
file_chunks)
if not files[filename]:
del files[filename]
self._handle_response(util.request_with_retry(
self._client.post, self._endpoint, json={'files': files}))
def stream_file(self, path):
name = path.split("/")[-1]
self._send([Chunk(name, line) for line in open(path).readlines()])
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Args:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
def finish(self, exitcode):
"""Cleans up.
Anything pushed after finish will be dropped.
Args:
exitcode: The exitcode of the watched process.
"""
self._queue.put(self.Finish(exitcode))
self._thread.join()
|
hadoop_transfile.py
|
# -*- coding: utf-8 -*-
from base.log import *
import os
from multiprocessing import Process
import redis_pool
def gen_id():
r = redis_pool.get('dornaemon')
return r.incr('deploy_id')
def do(src_path, dest_path,file_mode, deploy_id):
log_path = 'html/deploy_logs/%d.txt' % deploy_id
cmd = "python /home/op/wzs/python/transfile_hadoop.py %s %s %s > %s 2>&1" % (src_path, dest_path,file_mode, log_path)
logger().info('cmd:%s', cmd)
if 0 != os.system(cmd):
msg = '[Finish] Failed to deploy [%s]' % deploy_id
else:
msg = '[Finish] Success to deploy [%s]' % deploy_id
logger().error(msg)
open(log_path, 'a').write(msg)
def start(src_path, dest_path, file_mode):
deploy_id = gen_id()
p = Process(target=do, args=(src_path, dest_path, file_mode, deploy_id))
p.start()
logger().info('start deploy [%s]', deploy_id)
return deploy_id
def status(deploy_id):
log_path = 'html/deploy_logs/%s.txt' % deploy_id
content = open(log_path).read()
return content
def main():
deploy_id = start('hadoop@hadoop-0015:/data/a.txt', '/mnt/data1/release')
while True:
time.sleep(1)
#print status(deploy_id)
if __name__ == '__main__':
main()
|
demo.py
|
########### Python 3.6 #############
import http.client, urllib.request, urllib.parse, urllib.error, base64, requests, json, cv2
import numpy as np
import cv2
import os
import keyboard
from time import sleep
import time
import pygame
import sys
import random
import keyboard
import threading
from threading import Thread
# from multiprocessing import Process
###############################################
#### Update or verify the following values. ###
###############################################
#_songs = ['mu/romantic.mp3','mu/joy.mp3', 'mu/relax.mp3', 'mu/rock.mp3']
_songs = ['music/0.mp3','music/1.mp3','music/2.mp3','music/3.mp3','music/4.mp3','music/5.mp3','music/6.mp3','music/7.mp3','music/8.mp3','music/9.mp3', 'music/10.mp3', 'music/11.mp3', 'music/12.mp3' ,'music/13.mp3','music/14.mp3']
_currently_playing_song = None
_songsindex = 0
ans = {}
ans['answer'] = 'happy'
ans['flag'] = False
##############################################################################################
import ctypes # An included library with Python install.
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
#Mbox('Analyzing your emotion', 'Do you like this song ?', 0)
def playsound(soundfile):
"""Play sound through default mixer channel in blocking manner.
This will load the whole sound into memory before playback
"""
pygame.init()
pygame.mixer.init()
sound = pygame.mixer.Sound(soundfile)
clock = pygame.time.Clock()
sound.play()
while pygame.mixer.get_busy():
print("Playing...")
#clock.tick(1000)
def playmusic(soundfile):
"""Stream music with mixer.music module in blocking manner.
This will stream the sound from disk while playing.
"""
global ans
#: No need for global declaration to just read value
#pygame.init()
#pygame.mixer.init()
#clock = pygame.time.Clock()
pygame.mixer.music.load(soundfile)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
# print(pygame.mixer.music.get_pos())
if pygame.mixer.music.get_pos() >= 18000:
#print(pygame.mixer.music.get_pos())
play_next_song() # if it happy
#print("You are loving this song, try this song now from the same singer !")
if ans['answer'] == 'sad' and ans['flag'] == True:
pygame.mixer.music.stop()
#print("You don't seem to like this song, try song from a new singer !")
#time.sleep(2)
ans['flag'] = False
play_next_genre()
def stopmusic():
"""stop currently playing music"""
pygame.mixer.music.stop()
def getmixerargs():
pygame.mixer.init()
freq, size, chan = pygame.mixer.get_init()
return freq, size, chan
def initMixer():
BUFFER = 3072 # audio buffer size, number of samples since pygame 1.8.
FREQ, SIZE, CHAN = getmixerargs()
pygame.init()
pygame.mixer.init(FREQ, SIZE, CHAN, BUFFER)
def play_a_different_song():
global _currently_playing_song, _songs
next_song = random.choice(_songs)
print(next_song)
while next_song == _currently_playing_song:
print(_currently_playing_song)
next_song = random.choice(_songs)
print(next_song)
_currently_playing_song = next_song
pygame.mixer.music.load(next_song)
pygame.mixer.music.play()
def play_next_song():
global _songs
global _songsindex
if _songsindex == 14:
_songsindex = 0
else:
_songsindex =_songsindex+1
pygame.mixer.music.load(_songs[_songsindex])
print("Now playing : {}".format(_songsindex))
pygame.mixer.music.play()
#print(pygame.mixer.music.get_pos())
def play_next_genre():
global _songs
global _songsindex
_songsindex+=3;
if _songsindex > 14:
_songsindex = 0
pygame.mixer.music.load(_songs[_songsindex])
print("Now playing : {}".format(_songsindex))
pygame.mixer.music.play()
###############################################
#### Update keys ###
###############################################################################################
# Replace the subscription_key string value with your valid subscription key.
subscription_key = 'a964c99ef5a944d99e2d50e1fea958d0'
# other keys for demo:
# Replace or verify the region.
#
# You must use the same region in your REST API call as you used to obtain your subscription keys.
# For example, if you obtained your subscription keys from the westus region, replace
# "westcentralus" in the URI below with "westus".
#
# NOTE: Free trial subscription keys are generated in the westcentralus region, so if you are using
# a free trial subscription key, you should not need to change this region.
uri_base = 'https://westcentralus.api.cognitive.microsoft.com'
# Request headers.
headers = {
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': subscription_key,
}
# Request parameters.
params = {
'returnFaceId': 'false',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'smile,emotion',
}
def find_emotions(path):
# pathToFileInDisk = r'C:\Users\girls.jpg'
global ans
#global flag
#ans['answer'] = 'happy'
pathToFileInDisk = r'C:\Users\Karan Tyagi\Desktop\add to github\0 - hackbeanspot\code\{}'.format(path)
with open( pathToFileInDisk, 'rb' ) as f:
data = f.read()
try:
# Execute the REST API call and get the response.
response = requests.request('POST', uri_base + '/face/v1.0/detect', json=None, data=data, headers=headers, params=params)
# print ('Response:')
parsed = json.loads(response.text)
# print (json.dumps(parsed, sort_keys=True, indent=2))
frame_num = 1
sad_count = 0
happy_count = 0
for face in parsed:
#print(' Frame {} :\t'.format(frame_num),face['faceAttributes']['emotion'])
#print(' Smile {} :\t'.format(fno),face['faceAttributes']['smile'])
result = (face['faceAttributes']['emotion']['sadness'] + face['faceAttributes']['emotion']['neutral'] + face['faceAttributes']['emotion']['fear'] +
face['faceAttributes']['emotion']['disgust'] + face['faceAttributes']['emotion']['contempt'] +
face['faceAttributes']['emotion']['anger'])
# print("- - - - - - - - - - Analyzing your emotions - - - - - - - - - - -");
if result > face['faceAttributes']['emotion']['happiness']:
sad_count+=1
# Mbox('Analyzing your emotions', 'Don\'t be sad ? I am here to help. I \'ll change the song for you.', 0)
else:
happy_count+=1
# Mbox('Analyzing your emotions', 'I am glad you are happy . I think you like this artist.', 0)
# print(sum)
#print('\t{}'.format(face['faceAttributes']['emotion']['happiness']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['surprise']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['neutral']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['sadness']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['disgust']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['anger']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['contempt']).ljust(18)+'{}'.format(face['faceAttributes']['emotion']['fear']).ljust(18)+" "+ans['answer'])
if sad_count > happy_count:
ans['answer'] = 'sad'
ans['flag'] = True
print('\t >> Don\'t be sad ? I am here to help. I \'ll change the song for you.')
# Mbox('Analyzing your emotions', 'Don\'t be sad ? I am here to help. I \'ll change the song for you.', 0)
else:
ans['answer'] ='happy'
print('\t >> I think you like this artist.')
# time.sleep(4) # happy song gets extended window of +4 sec, no processing till then
#frame_num+=1
except Exception as e:
print('Error:')
print(e)
def playvideo():
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
img_counter = 1
timer = 0
processed = 0
#print('\n\tHappiness'.ljust(18)+'Surprise'.ljust(18)+'Neutral'.ljust(18)+'Sadness'.ljust(18)+'Disgust'.ljust(18)+'Anger'.ljust(18)+'Contempt'.ljust(18)+'Fear'.ljust(18))
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
img_name = "img_frame{}.jpg".format(img_counter)
# print("Created img_frame{}.jpg".format(img_counter))
key = cv2.waitKey(25) # 1000/25 = 40 FPS
if timer % 40 == 0:
print('Tip : {}'.format(4-(timer / 40)), end='\r')
# start processing the image emotions when timer is
#if
if timer / 40 == 4 :
cv2.imwrite(img_name, frame)
processed+=1
find_emotions(img_name)
key = cv2.waitKey(50) # milliseconds
timer=1
if os.path.isfile(img_name):
os.remove(img_name)
continue
# deleting the image after processing it
#print("Deleted img_frame{}.jpg".format(i))
else: ## Show an error ##
print("Error: %s file not found" % myfile)
continue
timer+=1
# take less frames
# end processing this frame
# deleting the image after processing it
# print("Deleted img_frame{}.jpg".format(img_counter))
# this can be put in a try catch box
## if file exists, delete it ##
img_counter+=1
if key == 27 or processed == 18: # exit on ESC
break
cv2.destroyWindow("preview")
print('API calls made or number frames processed for emotion detection : {}'.format(processed))
vc.release()
#print("\n\n\tAll images deleted. Memory freed. Enjoy Lappy. ;)")
def backgroundmusic():
initMixer()
global _songsindex
_songsindex = 0 # random number can also be used
filename = (_songs[_songsindex ])
#print(filename)
print("Now playing : {}".format(_songsindex))
playmusic(filename)
# playvideo()
# backgroundmusic()
def printit():
# run your code
print('Time : ', end='\r')
threading.Timer(1.0, printit).start()
end = time.time()
elapsed = int(end - start)
print('Time : {}'.format(elapsed), end='\r')
if __name__ == '__main__':
#start = time.time()
#printit()
p1 = Thread(target=backgroundmusic)
p1.start()
p2 = Thread(target=playvideo)
p2.start()
p1.join()
p2.join()
####################################
|
main_bot.py
|
from concurrent import futures
import copy
import datetime
import threading
import time
import telebot
from telebot.types import LabeledPrice
import schedule
from configs import settings
from models.parsers import CurrencyExchanger
from models.user import User, Prediction, Session
from models import exceptions
from utils import (
get_proxy_list, prettify_float, get_json_config, substract_percent,
prettify_percent, prettify_utcoffset
)
from utils.translator import translate as _
from utils.telegram import kbs, inline_kbs
from utils.dt import (
convert_datetime, check_datetime_in_future, convert_from_country_format,
adapt_datetime, convert_to_country_format, get_now, get_country_dt_example,
adapt_check_times
)
telebot.apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(settings.TOKEN, threaded=False) # RecursionError
bot.full_bot_commands = {
'/start': 'запустить бота', # Start the bot
'/me': 'ваша информация', # Your info
'/today': 'котировки', # Quotes
'/change_checktime': 'сменить время оповещений', # Change check times
'/change_delta': 'сменить разницу в процентах, при которой оповещать',
# Change percent delta at which to notify
'/change_timezone': 'сменить ваш часовой пояс', # change your timezone
'/toggle_alarms': 'включить/выключить оповещения', # Toggle alarms
'/toggle_experts_predictions': 'включить/выключить прогнозы от экспертов',
# Toggle experts predictions
'/make_prediction': 'сделать прогноз', # Make a prediction
'/get_predictions': 'прогнозы', # Go to "Predictions" section
'/convert': 'конвертер валют', # Currency Converter
'/menu': 'главное меню', # Main menu
'/subscription': 'подписка', # Go to "Subscription" section
'/language': 'сменить язык', # Change language
'/techsupport': 'техподдержка', # Go to "Techsupport" section
'/help': 'помощь по командам', # Help with commands
}
bot.short_bot_commands = {
k: bot.full_bot_commands.get(k)
for k in ['/start', '/me', '/today', '/subscription', '/language', '/help']
}
bot.skip_pending = True
currency_parser = CurrencyExchanger(proxy_list=get_proxy_list())
USERS_SESSIONS = {}
###############################################################################
def get_or_create_session(chat_id):
global USERS_SESSIONS
try:
session = USERS_SESSIONS.get(chat_id)
if not session:
session = Session(chat_id)
settings.logger.debug(f"{session.user} logged in")
USERS_SESSIONS[chat_id] = session
except MemoryError:
for i in range(50):
USERS_SESSIONS.popitem()
return get_or_create_session(chat_id)
else:
return USERS_SESSIONS[chat_id]
# Used not to initialize the user every time, just save their state
@bot.middleware_handler(update_types=['message'])
def set_message_session(bot_instance, message):
bot_instance.session = get_or_create_session(message.chat.id)
# Used not to initialize the user every time, just save their state
@bot.middleware_handler(update_types=['callback_query'])
def set_call_session(bot_instance, call):
bot_instance.session = get_or_create_session(call.message.chat.id)
@bot.middleware_handler(update_types=['message'])
def check_if_command(bot_instance, message):
# answer for command, even if the `register_next_step_handler` is used
if message.entities:
is_bot_command = (
message.entities[0].type == 'bot_command' and
message.text in bot_instance.full_bot_commands
)
if is_bot_command:
try:
bot_instance.clear_step_handler(message)
except RecursionError:
pass
###############################################################################
@settings.logger.catch_error
@bot.message_handler(commands=['start'])
def start_message(msg):
user = bot.session.user
tech_support_recognizer = settings.ACCESSIBLE_LINK.split('=')[1]
add_info = msg.text.split()[1:]
bot.send_message(
msg.chat.id,
_(
'Welcome, {}!',
user.language
).format(msg.from_user.first_name)
)
bot.send_message(
msg.chat.id,
_(
"I am <b>{}</b>, your personal shareholder bot, and I will keep"
" you updated on important trading events!",
user.language
).format(bot.get_me().first_name),
parse_mode='html'
)
if (add_info and (
tech_support_recognizer in add_info
)) or not list(User.get_staff_users()):
# if user started bot with support link or there are not staff users
user.init_staff()
bot.send_message(
msg.chat.id,
_(
'⚙ You have received a technical support status ⚙',
user.language
)
)
settings.logger.info(f"{user} recieved staff status")
return start_bot(msg)
@bot.message_handler(commands=['menu'])
def start_bot(msg, to_show_commands: bool = True):
user = bot.session.user
buttons = [
_('Quotes', user.language),
_('Notifications', user.language),
_('Subscription', user.language),
_('Language', user.language),
_('Technical support', user.language)
]
kb = kbs(buttons, one_time_keyboard=False)
if to_show_commands:
commands_str = '\n'.join(
'{} - %s' % v for k, v in bot.short_bot_commands.items()
)
bot.send_message(
msg.chat.id,
_(
commands_str,
user.language,
).format(*list(bot.short_bot_commands)),
reply_markup=kb
)
else:
bot.send_message(
msg.chat.id, _("Main menu", user.language), reply_markup=kb
)
bot.register_next_step_handler(msg, choose_option, buttons=buttons)
def choose_option(msg, buttons=None):
buttons = buttons or []
user = bot.session.user
if buttons[0] == msg.text:
# see exchange rates for today
return get_currency_rates_today(msg)
elif buttons[1] == msg.text:
# go to notifications section
buttons = {
_("Your info", user.language): see_user_info,
_(
'Change alarm time', user.language
): change_user_rate_check_times,
_(
'Change alarm percent', user.language
): change_user_rate_percent_delta,
_('Toggle alarms', user.language): toggle_user_alarms,
_(
"Toggle experts predictions", user.language
): toggle_user_experts_predictions,
_('Change time zone', user.language): change_user_timezone,
_('Main menu', user.language): start_bot
}
if user.is_pro:
buttons[_(
'⚜ Other currencies ⚜', user.language
)] = other_user_currencies_menu
kb = kbs(list(buttons), one_time_keyboard=False, row_width=2)
bot.send_message(
msg.chat.id,
_('Выберите опцию', user.language),
reply_markup=kb
)
return bot.register_next_step_handler(
msg, change_alarms, buttons
)
elif buttons[2] == msg.text:
return buy_subscription(msg)
elif buttons[-2] == msg.text:
# change system language
return change_language(msg)
elif buttons[-1] == msg.text:
return send_techsupport_message(msg)
else:
return bot.register_next_step_handler(msg, choose_option, buttons)
@bot.message_handler(commands=['today'])
def get_currency_rates_today(msg):
user = bot.session.user
buttons_dct = {
_('Make a prediction', user.language): make_user_currency_prediction,
_('View predictions', user.language): see_users_currency_predictions,
_('Convert', user.language): convert_currency,
_('Main menu', user.language): start_bot
}
def choose_option_inner(msg_inner):
if buttons_dct.get(msg_inner.text, None) is None:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Choose only from the suggestions ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, choose_option_inner)
else:
return buttons_dct.get(msg_inner.text)(msg_inner)
bot.send_message(
msg.chat.id,
currency_parser.to_telegram_string(user.language),
parse_mode='Markdown',
reply_markup=kbs(list(buttons_dct))
)
bot.register_next_step_handler(msg, choose_option_inner)
@bot.message_handler(commands=['make_prediction'])
def make_user_currency_prediction(msg):
user: User = bot.session.user
date = None
iso_from = None
iso_to = None
value = None
def get_date(msg_inner):
nonlocal date
try:
up_to_date = convert_datetime(
convert_from_country_format(msg_inner.text, user.language),
user.timezone
)
assert check_datetime_in_future(up_to_date)
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Please enter the date only in the specified format ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, get_date)
except AssertionError:
bot.send_message(
msg_inner.chat.id,
_('❗ You cannot enter a past date ❗', user.language)
)
bot.register_next_step_handler(msg_inner, get_date)
else:
date = up_to_date
bot.send_message(
msg_inner.chat.id,
_(
'Enter the ISO-codes of the forecast currency '
'`<ISO>-<ISO>`\nFor example, USD-RUB',
user.language
),
parse_mode='Markdown',
reply_markup=kbs(settings.ACCEPTABLE_CURRENCIES_CONVERTION)
)
bot.register_next_step_handler(msg_inner, get_iso)
def get_iso(msg_inner):
nonlocal iso_from, iso_to
msg_inner.text = settings.ACCEPTABLE_CURRENCIES_CONVERTION.get(
msg_inner.text, msg_inner.text
)
try:
iso_from, iso_to = [x.strip() for x in msg_inner.text.split('-')]
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Enter currency iso codes only'
' in the specified format ❗',
user.language
)
)
else:
if currency_parser.check_rate_exists(iso_from, iso_to):
bot.send_message(
msg_inner.chat.id,
_(
"Enter the forecast result "
"(for example, 27.50, 22300)",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_value)
else:
bot.send_message(
msg_inner.chat.id,
_(
"❗ This currency does not exist or is not supported"
", please try another one ❗",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_iso)
def get_value(msg_inner):
nonlocal value
try:
value = float(msg_inner.text.replace(',', '.'))
except ValueError:
bot.send_message(
msg_inner.chat.id, _('❗ Enter only numbers ❗', user.language)
)
bot.register_next_step_handler(msg_inner, get_value)
else:
buttons = [_('Yes', user.language), _('No', user.language)]
bot.send_message(
msg_inner.chat.id,
_(
'Here is the forecast data:\nForecast period: {}'
'\nCurrency: {} - {}\nValue: {}\n.\nConfirm '
'forecast creation?',
user.language
).format(
convert_to_country_format(
adapt_datetime(date, user.timezone), user.language
),
iso_from,
iso_to,
prettify_float(value)
),
reply_markup=kbs(buttons)
)
bot.register_next_step_handler(
msg_inner, confirm_prediction, buttons
)
def resend_prediction_all_users(prediction):
for usr in User.get_all_users(if_all=False):
if usr.to_notify_by_experts:
if Session.db.fetch_count(usr.id) > 0:
bot.send_message(
usr.id,
_(
'*⚜ Experts prediction ⚜*\n*Currencies: {}-{}*\n'
'*Up to:* {}\n*Predicted value:* {}',
usr.language
).format(
prediction.iso_from, prediction.iso_to,
convert_to_country_format(
adapt_datetime(
prediction.up_to_date, usr.timezone
),
usr.language
),
prettify_float(prediction.value)
),
parse_mode='Markdown'
)
Session.db.decrease_count(usr.id)
else:
bot.send_message(
usr.id,
_(
"❗ Your limit on receiving predictions has"
" expired, contact our support team ❗",
usr.language
)
)
def confirm_prediction(msg_inner, buttons):
if msg_inner.text == buttons[0]:
user.create_prediction(
iso_from, iso_to, prettify_float(value), date
)
if user.is_staff:
threading.Thread(
target=resend_prediction_all_users,
args=(user.predictions[-1],), daemon=True
).start()
bot.send_message(
msg_inner.chat.id,
_('The forecast has been created!', user.language)
)
return start_bot(msg_inner)
elif msg_inner.text == buttons[1]:
bot.send_message(
msg_inner.chat.id, _('Forecast not created', user.language)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id, _('Response not processed', user.language)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_('To exit anywhere, enter {}', user.language).format('/menu')
)
datetime_format = get_country_dt_example(user.language)
datetime_example = convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
)
bot.send_message(
msg.chat.id,
_(
'Select the forecast validity period in the format `{}`\n'
'For example, {}',
user.language
).format(datetime_format, datetime_example),
parse_mode='Markdown'
)
bot.register_next_step_handler(msg, get_date)
@bot.message_handler(commands=['get_predictions'])
def see_users_currency_predictions(msg):
user = bot.session.user
def see_self_predictions(msg_inner):
preds = {
x.trepr(user): f'get_prediction_{x.id}'
for x in user.get_predictions()
}
kb_inline = inline_kbs(preds, row_width=1)
if len(preds) == 0:
bot.send_message(
msg_inner.chat.id,
_('You have no predictions so far, create one!', user.language)
)
else:
bot.send_message(
msg_inner.chat.id,
_('Here are your predictions', user.language),
reply_markup=kb_inline
)
return see_users_currency_predictions(msg_inner)
def see_other_users_predictions(msg_inner):
if user.is_pro:
experts_str = (
'⚜ Experts predictions ⚜ are:\n'
+
('\n\n'.join([
x.tstr(user)
for x in Prediction.get_experts_predictions()][:5]
) or ' none')
)
if experts_str.endswith('none'):
# if no predictions were concatenated to prefix
experts_str = experts_str.replace('\n', '')
bot.send_message(
msg_inner.chat.id,
_(experts_str, user.language),
)
liked_preds_str = (
'Most liked predictions are:\n'
+
('\n\n'.join([
x.tstr(user)
for x in Prediction.get_most_liked_predictions()][:5]
) or ' none')
)
if liked_preds_str.endswith('none'):
# if no predictions were concatenated to prefix
liked_preds_str = liked_preds_str.replace('\n', '')
bot.send_message(
msg_inner.chat.id,
_(
liked_preds_str,
user.language
),
)
return see_users_currency_predictions(msg_inner)
def liking_system(msg_inner):
try:
rand_pred = Prediction.get_random_prediction()
except exceptions.PredictionDoesNotExistError:
# if no predictions are there
bot.send_message(
msg_inner.chat.id,
_(
'There are no predictions to like yet,'
' you can create one!',
user.language
)
)
return start_bot(msg_inner)
else:
closest = rand_pred.get_closest_neighbours()
previous, nxt = closest['previous'], closest['next']
inline_buttons = {
'👍': f'like_prediction_{rand_pred.id}',
'👎': f'dislike_prediction_{rand_pred.id}'
}
if previous:
inline_buttons['<<'] = f'previous_prediction_to_{rand_pred.id}'
if nxt:
inline_buttons['>>'] = f'next_prediction_to_{rand_pred.id}'
inline_kb = inline_kbs(inline_buttons, row_width=2)
bot.send_message(
msg_inner.chat.id,
_(rand_pred.tstr(user), user.language),
reply_markup=inline_kb
)
return see_users_currency_predictions(msg_inner)
def choose_option_inner(msg_inner):
res_func = buttons.get(msg_inner.text, None)
if res_func is not None:
return res_func(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_('❗ Choose only from the suggestions ❗', user.language),
reply_markup=kbs(list(buttons))
)
bot.register_next_step_handler(msg_inner, choose_option_inner)
buttons = {
_('My predictions', user.language): see_self_predictions,
_('Other predictions', user.language): see_other_users_predictions,
_('Participate in the assessment', user.language): liking_system,
_('Main menu', user.language): start_bot
}
bot.send_message(
msg.chat.id,
_('Choose from the following:', user.language),
reply_markup=kbs(list(buttons))
)
bot.register_next_step_handler(msg, choose_option_inner)
def get_prediction_inline_kb_for_liking(pred):
closest = pred.get_closest_neighbours()
previous, nxt = closest['previous'], closest['next']
inline_buttons = {
'👍': f'like_prediction_{pred.id}',
'👎': f'dislike_prediction_{pred.id}'
}
if previous:
inline_buttons['<<'] = f'previous_prediction_to_{pred.id}'
if nxt:
inline_buttons['>>'] = f'next_prediction_to_{pred.id}'
inline_kb = inline_kbs(inline_buttons, row_width=2)
return inline_kb
@bot.callback_query_handler(
lambda call: (
'next_prediction_to_' in call.data or
'previous_prediction_to_' in call.data
)
)
def get_closest_prediction(call):
action, *data, pred_id = call.data.split('_')
start_pred = Prediction(int(pred_id))
following_pred = start_pred.get_closest_neighbours()[action]
user = bot.session.user
inline_kb = get_prediction_inline_kb_for_liking(following_pred)
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(following_pred.tstr(user), user.language),
reply_markup=inline_kb
)
@bot.callback_query_handler(
lambda call: (
'like_prediction_' in call.data or 'dislike_prediction_' in call.data
)
)
def toggle_user_reaction(call):
action, *some_data, pred_id = call.data.split('_')
prediction = Prediction(int(pred_id))
user = bot.session.user
reaction = True if action == 'like' else False
prediction.toggle_like(call.message.chat.id, reaction)
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(prediction.tstr(user), user.language),
reply_markup=get_prediction_inline_kb_for_liking(prediction)
)
bot.answer_callback_query(
callback_query_id=call.id,
show_alert=False,
text=_(f'You {action}d this prediction', user.language)
)
@bot.callback_query_handler(lambda call: 'get_prediction_' in call.data)
def get_prediction_details(call):
pred_id = int(call.data.split('_')[-1])
pred = Prediction(pred_id)
user = bot.session.user
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(pred.tstr(user), user.language),
reply_markup=inline_kbs({
_('Delete', user.language): f'ask_delete_prediction_{pred_id}',
_('Back', user.language): f'get_user_predictions_{pred.user_id}'
}, row_width=1)
)
@bot.callback_query_handler(lambda call: 'ask_delete_prediction_' in call.data)
def ask_delete_prediction(call):
pred_id = int(call.data.split('_')[-1])
pred = Prediction(pred_id)
user = bot.session.user
if pred.is_actual:
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(
"Are you sure you want to delete this prediction:\n{}?",
user.language
).format(pred.trepr(user)),
reply_markup=inline_kbs({
_('Yes', user.language): f'delete_prediction_{pred_id}',
_('No', user.language): f'get_user_predictions_{pred.user_id}'
})
)
else:
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_('You cannot delete a verified prediction!', user.language),
reply_markup=inline_kbs({
_(
'Back', user.language
): f'get_user_predictions_{pred.user_id}'
})
)
@bot.callback_query_handler(lambda call: 'delete_prediction_' in call.data)
def delete_prediction(call):
pred_id = int(call.data.split('_')[-1])
prediction = Prediction(pred_id)
user = bot.session.user
bot.delete_message(call.message.chat.id, call.message.message_id)
if prediction.is_actual:
prediction.delete()
answer_msg = _(
"Prediction ({}) was deleted",
user.language
).format(prediction.trepr(user))
else:
answer_msg = _(
'You cannot delete a verified prediction!', user.language
)
bot.answer_callback_query(
callback_query_id=call.id,
show_alert=False,
text=answer_msg
)
@bot.callback_query_handler(lambda call: 'get_user_predictions_' in call.data)
def get_user_predictions(call):
user = bot.session.user
kb_inline = inline_kbs({
x.trepr(user): f'get_prediction_{x.id}'
for x in user.get_predictions()
}, row_width=1)
return bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_('Here are your predictions', user.language),
reply_markup=kb_inline
)
@bot.message_handler(commands=['convert'])
def convert_currency(msg):
user = bot.session.user
iso_from = None
iso_to = None
def get_isos(msg_inner):
nonlocal iso_from, iso_to
try:
iso_from, iso_to = [x.upper() for x in msg_inner.text.split('-')]
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Enter currency iso codes'
' only in the specified format ❗',
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_isos)
else:
return print_convertation(msg_inner)
def print_convertation(msg_inner):
nonlocal iso_from, iso_to
try:
rate = currency_parser.get_rate(iso_from, iso_to)
except Exception:
bot.send_message(
msg_inner.chat.id,
_(
"❗ The converter did not find such"
" currencies, please try again ❗",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_isos)
else:
markup = inline_kbs(
{
i: f"change_currency_converter_amount_to_{i}"
for i in settings.CURRENCY_RATES_CHANGE_AMOUNTS
}
)
bot.send_message(
msg_inner.chat.id,
_('Conversion by {}:\n{} {} - {} {}', user.language).format(
convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
),
prettify_float(rate[iso_from]),
iso_from,
prettify_float(rate[iso_to]),
iso_to
),
reply_markup=markup
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'Enter the ISO-codes of currencies `<ISO>-<ISO>`\n'
'For example, USD-RUB',
user.language
),
parse_mode='Markdown'
)
bot.register_next_step_handler(msg, get_isos)
@bot.callback_query_handler(
lambda call: 'change_currency_converter_amount_to_' in call.data
)
def get_callback_for_change_currency_converter_amount(call):
user = bot.session.user
def change_currency_converter_amount(call_inner):
try:
if call_inner.message:
change_amount = call_inner.data.split('_')[-1]
change_amount = float(change_amount)
iso_from, iso_to = [
x.split()
for x in call_inner.message.text.split(':')[-1].split('-')
]
rate = float(iso_to[0].replace(',', '.')) / float(
iso_from[0].replace(',', '.')
)
new_amount = rate * change_amount
markup = inline_kbs(
{
i: f"change_currency_converter_amount_to_{i}"
for i in settings.CURRENCY_RATES_CHANGE_AMOUNTS
}
)
if change_amount == float(iso_from[0]):
# if we try to set the same text as before, an error occurs
return bot.answer_callback_query(
callback_query_id=call_inner.id,
show_alert=False,
text=_(
f"Amount is already {change_amount}",
user.language
)
)
else:
bot.edit_message_text(
chat_id=call_inner.message.chat.id,
message_id=call_inner.message.message_id,
text=_(
'Conversion by {}:\n{} {} - {} {}',
user.language
).format(
convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
),
prettify_float(change_amount),
iso_from[1],
prettify_float(new_amount),
iso_to[1]
),
reply_markup=markup
)
bot.answer_callback_query(
callback_query_id=call_inner.id,
show_alert=False,
text=_(
"Amount on {}-{} changed to {}",
user.language
).format(iso_from[1], iso_to[1], change_amount)
)
except Exception as e:
print(repr(e))
def ask_sum(msg, call_inner, to_delete: list):
try:
value = float(msg.text.replace(',', '.'))
except ValueError:
warning_msg = bot.send_message(
msg.chat.id, _('❗ Enter only numbers ❗', user.language)
)
to_delete = list(to_delete) + [msg, warning_msg]
bot.register_next_step_handler(msg, ask_sum, call_inner, to_delete)
else:
call_inner.data = f"change_currency_converter_amount_to_{value}"
try:
# delete messages
for msg_ in to_delete:
bot.delete_message(msg_.chat.id, msg_.message_id)
bot.delete_message(msg.chat.id, msg.message_id)
except Exception as e:
# permission to delete messages was not received
print(repr(e))
return change_currency_converter_amount(call_inner)
def set_amount_to_1(call_inner):
call_inner.data = f"change_currency_converter_amount_to_{1}"
return change_currency_converter_amount(call_inner)
if call.message:
command = call.data.split('_')[-1]
if command == '...':
# bot.clear_step_handler(call.message)
msg_to_delete = bot.send_message(
call.message.chat.id,
_(
'Enter new amount',
user.language
)
)
return bot.register_next_step_handler(
call.message, ask_sum, call, [msg_to_delete]
)
elif command == 'Reset':
return set_amount_to_1(call)
def change_alarms(msg, buttons):
user = bot.session.user
func = buttons.get(msg.text, None)
if func is None:
bot.send_message(
msg.chat.id,
_(
"❗ I can't understand your request, please try again ❗",
user.language
),
reply_markup=kbs(list(buttons), row_width=2)
)
return bot.register_next_step_handler(
msg,
change_alarms,
buttons
)
else:
return func(msg)
@bot.message_handler(commands=['toggle_alarms'])
def toggle_user_alarms(msg):
user = bot.session.user
user.update(is_active=not user.is_active)
bot.send_message(
msg.chat.id,
_(
f"Notifications {'en' if user.is_active else 'dis'}abled",
user.language
)
)
return start_bot(msg)
@bot.message_handler(commands=['toggle_experts_predictions'])
def toggle_user_experts_predictions(msg):
user = bot.session.user
user.update(to_notify_by_experts=not user.to_notify_by_experts)
bot.send_message(
msg.chat.id,
_(
"Experts' predictions {}abled".format(
'en' if user.to_notify_by_experts else 'dis'
),
user.language
)
)
return start_bot(msg)
@bot.message_handler(commands=['me'])
def see_user_info(msg):
u = bot.session.user
is_subscribed = (
f'до {convert_to_country_format(u.is_pro, u.language)}'
if isinstance(u.is_pro, datetime.datetime) else
'да' if u.is_pro is True else 'нет'
)
info = (
f"Пользователь @{msg.from_user.username}\n" +
f"Telegram ID: {u.id}\n" +
f"Подписка: {is_subscribed}\n" +
f"Персонал: {'да' if u.is_staff else 'нет'}\n" +
f"Часовой пояс: {prettify_utcoffset(u.timezone)}\n" +
f"Оповещения: {'включены' if u.is_active else 'отключены'}\n" +
'Прогнозы от экспертов: {}\n'.format(
'включены' if u.to_notify_by_experts else 'отключены'
) +
User.prettify_rates(u.rates)
)
bot.send_message(msg.chat.id, _(info, u.language))
return start_bot(msg)
@settings.logger.catch_error
@bot.message_handler(commands=['change_delta'])
def change_user_rate_percent_delta(msg):
user = bot.session.user
currency = None
def inner1(msg_inner):
nonlocal currency
if msg_inner.text in user.rates:
currency = msg_inner.text
bot.send_message(
msg_inner.chat.id,
_(
"Your interest on {} - {}\nSelect the amount of interest",
user.language
).format(
currency,
prettify_percent(
user.rates.get(currency).get('percent_delta')
)
),
reply_markup=kbs(settings.PERCENTAGES)
)
bot.register_next_step_handler(msg_inner, inner2)
else:
bot.send_message(
msg_inner.chat.id,
'❗ Please enter only valid currencies ❗',
reply_markup=kbs(settings.CURRENCIES)
)
bot.register_next_step_handler(msg_inner, inner1)
def inner2(msg_inner):
nonlocal currency
try:
if 'inf' not in msg_inner.text:
delta = float(msg_inner.text) / 100
assert 0 < delta < 1
else:
raise ValueError
except ValueError:
bot.send_message(
msg_inner.chat.id,
_("❗ Enter only numbers ❗", user.language)
)
return bot.register_next_step_handler(msg_inner, inner2)
except AssertionError:
bot.send_message(
msg_inner.chat.id,
_("❗ Percent must be in range from 0 to 100 ❗", user.language)
)
return bot.register_next_step_handler(msg_inner, inner2)
user.update_rates(currency, percent_delta=delta)
bot.send_message(
msg_inner.chat.id,
_("Your percentage is now {}", user.language).format(
prettify_percent(delta)
)
)
return start_bot(msg_inner)
kb = kbs(list(user.rates))
bot.send_message(
msg.chat.id,
_("Выберите валюту изменения процентов", user.language),
reply_markup=kb
)
return bot.register_next_step_handler(msg, inner1)
@settings.logger.catch_error
@bot.message_handler(commands=['change_checktime'])
def change_user_rate_check_times(msg):
user = bot.session.user
available_times = copy.deepcopy(settings.CHECK_TIMES)
chosen_times = []
start = (
settings.UNSUBSCIRBED_USER_CHECK_TIMES
if not user.is_pro else
settings.SUBSCIRBED_USER_CHECK_TIMES
)
currency = None
def inner1(msg_inner):
nonlocal currency
if msg_inner.text in user.rates:
currency = msg_inner.text
if user.is_pro:
bot.send_message(
msg_inner.chat.id,
_(
"You subscribed ⚜ and you are presented"
" with all possible alert times!",
user.language
)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_(
'Your alert times for {} - {}',
user.language
).format(
currency,
','.join(
adapt_check_times(
user.rates.get(currency).get('check_times'),
user.timezone
)
)
)
)
bot.send_message(
msg_inner.chat.id,
_(
'Select {} time(s)',
user.language
).format(start),
reply_markup=kbs(
adapt_check_times(available_times, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner2, start)
else:
bot.send_message(
msg_inner.chat.id,
_('❗ Please enter only valid currencies ❗', user.language),
reply_markup=kbs(
adapt_check_times(settings.CURRENCIES, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner1)
def inner2(msg_inner, iteration_num):
nonlocal chosen_times, available_times
try:
if msg_inner.text in available_times:
time.strptime(msg_inner.text, '%H:%M')
iteration_num -= 1
available_times.remove(msg_inner.text)
chosen_times.append(msg_inner.text)
else:
raise ValueError
if iteration_num == 0:
chosen_times = sorted(
chosen_times,
key=lambda x: int(x.split(':')[0])
)
user.update_rates(currency, check_times=chosen_times)
bot.send_message(
msg_inner.chat.id,
_(
'Your alert times for {} - {}',
user.language
).format(
currency,
", ".join(chosen_times)
)
)
return start_bot(msg_inner)
except ValueError: # if time not in CHECK_TIMES or time is not valid
bot.send_message(
msg_inner.chat.id,
_(
"❗ Please enter only available dates ❗",
user.language
)
)
return bot.register_next_step_handler(
msg_inner, inner2, iteration_num
)
else:
bot.send_message(
msg_inner.chat.id,
_(
f"Enter more {iteration_num} time(s)",
user.language),
reply_markup=kbs(
adapt_check_times(available_times, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner2, iteration_num)
kb = kbs(user.rates.keys())
bot.send_message(
msg.chat.id,
_("Select the currency of the alert time change", user.language),
reply_markup=kb
)
return bot.register_next_step_handler(msg, inner1)
@settings.logger.catch_error
@bot.message_handler(commands=['change_timezone'])
def change_user_timezone(msg):
user = bot.session.user
timezones = {
prettify_utcoffset(zone): zone
for zone in range(-11, 13)
}
def accept_input(msg_inner):
res_timezone = timezones.get(msg_inner.text, None)
if res_timezone is None:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Please enter only suggested time zones ❗',
user.language,
),
reply_markup=kbs(list(timezones), row_width=2)
)
bot.register_next_step_handler(msg_inner, accept_input)
else:
user.update(timezone=res_timezone)
bot.send_message(
msg_inner.chat.id,
_(
'Now your time zone is {}',
user.language
).format(prettify_utcoffset(user.timezone))
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'Your current time zone is {}\nPlease select your time zone',
user.language
).format(prettify_utcoffset(user.timezone)),
reply_markup=kbs(list(timezones), row_width=2)
)
bot.register_next_step_handler(msg, accept_input)
def other_user_currencies_menu(msg):
user = bot.session.user
buttons = {
_("Add new currency", user.language): add_new_currency,
_("Delete currency", user.language): delete_user_currency,
_("Back", user.language): start_bot
}
def next_step(msg_inner):
option = buttons.get(msg_inner.text, None)
if option is None:
bot.send_message(
msg_inner.chat.id,
_('❗ Choose only from the suggestions ❗', user.language)
)
bot.register_next_step_handler(msg_inner, next_step)
else:
return option(msg_inner)
bot.send_message(
msg.chat.id,
_('Choose from the following:', user.language),
reply_markup=kbs(list(buttons), row_width=3)
)
bot.register_next_step_handler(msg, next_step)
@settings.logger.catch_error
def delete_user_currency(msg):
user = bot.session.user
curr = None
deletable_currencies = list(
set(user.rates).difference(set(settings.CURRENCIES))
)
answer_options = {
_("Yes", user.language): True,
_("No", user.language): False
}
def confirm_deletion(msg_inner):
option = answer_options.get(msg_inner.text, None)
if option is True:
user.delete_rate(curr)
bot.send_message(
msg_inner.chat.id,
_("Currency {} was deleted", user.language).format(curr)
)
elif option is False:
bot.send_message(
msg_inner.chat.id,
_("Currency {} wasn't deleted", user.language).format(curr)
)
elif option is None:
bot.send_message(
msg_inner.chat.id,
_(
"I don't understand your answer,"
" returning to the main menu...",
user.language
)
)
return start_bot(msg_inner)
def choose_currency_to_delete(msg_inner):
nonlocal curr
curr = msg_inner.text
if curr in deletable_currencies:
bot.send_message(
msg_inner.chat.id,
_(
"Are you sure you want to delete this currency: {}?",
user.language
).format(curr),
reply_markup=kbs(list(answer_options))
)
bot.register_next_step_handler(msg_inner, confirm_deletion)
else:
if curr == _("Back", user.language):
return start_bot(msg_inner)
elif curr in settings.CURRENCIES:
bot.send_message(
msg_inner.chat.id,
_("❗ You can't delete default currencies ❗", user.language)
)
else:
bot.send_message(
msg_inner.chat.id,
_("❗ This currency is not supported ❗", user.language)
)
bot.register_next_step_handler(
msg_inner, choose_currency_to_delete
)
if len(deletable_currencies) > 0:
bot.send_message(
msg.chat.id,
_("Choose currency to delete", user.language),
reply_markup=kbs(
deletable_currencies + [_("Back", user.language)],
one_time_keyboard=False
)
)
bot.register_next_step_handler(msg, choose_currency_to_delete)
else:
bot.send_message(
msg.chat.id,
_("You have no extra currencies to delete", user.language)
)
return start_bot(msg)
@settings.logger.catch_error
def add_new_currency(msg):
user = bot.session.user
def ask_new_iso(msg_inner):
iso = msg_inner.text
try:
rate = currency_parser.get_rate(iso, "USD").get("USD")
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ This currency does not exist or is not supported,'
' please try another one ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, ask_new_iso)
else:
if iso in user.rates:
bot.send_message(
msg_inner.chat.id,
_(
'❗ The currency is already on your currency list ❗',
user.language
)
)
return start_bot(msg_inner)
elif user.is_pro:
user.add_rate(
iso, value=rate, check_times=settings.CHECK_TIMES
)
bot.send_message(
msg_inner.chat.id,
_(
'New currency has been created successfully!\n'
'Now the rate is {} - {} USD',
user.language
).format(iso, rate)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_('Enter the ISO-code of the new currency', user.language),
reply_markup=kbs(['RUB', 'EUR', 'UAH', 'BYN'])
)
bot.register_next_step_handler(msg, ask_new_iso)
@settings.logger.catch_error
@bot.message_handler(commands=['subscription'])
def buy_subscription(msg):
user = bot.session.user
json_config = get_json_config()
prices_json_list = json_config.get('subscriptionPrices')
start_price = json_config.get('subscriptionStartPrice')
prices = [
[
LabeledPrice(
label=f"Cost of subscription for {p.get('period')} month" + (
's' if p.get('period') > 1 else ''
),
amount=int(prettify_float(start_price * p.get('period')) * 100)
)
] + ([
LabeledPrice(
label=f'Discount {p.get("discount")*100}%',
amount=-int(prettify_float(
start_price * p.get('period') * p.get('discount')
) * 100)
# * 100 because `amount` is interpreted in cents
)
] if p.get('discount') > 0 else [])
for p in prices_json_list
]
prices_easy = {
price.get('period'): price.get('discount')
for price in prices_json_list
}
def confirm_payment(msg_inner):
if msg_inner.text == _('Yes, I want to!', user.language):
prices_str = ''
for price in prices_json_list:
period = price.get('period')
word_ending = (
'' if period == 1 else
'a' if period in range(2, 5) else 'ов'
)
total_sum = int(substract_percent(
period * start_price, price.get('discount')
))
prices_str += f'\n{period} месяц{word_ending} - {total_sum} $'
bot.send_message(
msg_inner.chat.id,
_(
'Отлично!\nВыберите длительность Подписки (в месяцах)\n'
f'{prices_str}',
user.language
),
reply_markup=kbs(list(prices_easy))
)
bot.register_next_step_handler(msg_inner, get_months_number)
elif msg_inner.text == _('No, thanks', user.language):
bot.send_message(
msg_inner.chat.id, _('Okay, we\'ll wait!', user.language)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_(
"I don't understand your answer, "
"returning to the main menu...",
user.language
)
)
return start_bot(msg_inner)
def get_months_number(msg_inner):
months = msg_inner.text
if not (months.isdigit() and (
int(msg_inner.text) in list(prices_easy))
):
bot.send_message(
msg_inner.chat.id,
_('❗ Please enter only suggested values ❗', user.language),
reply_markup=kbs(list(prices_easy))
)
bot.register_next_step_handler(msg_inner, get_months_number)
else:
price = [
(y, x)
for x, y in zip(list(prices_easy), prices)
if x == int(months)
][0]
bot.send_message(
msg_inner.chat.id,
_(
'❗ Pay just as you receive invoice, '
'otherwise payment can be not received ❗',
user.language
)
)
return command_pay(msg_inner, *price)
def command_pay(msg_inner, prices_inner, n_months: int = None):
bot.send_invoice(
msg_inner.chat.id,
title=_('Подписка', user.language),
description=_(
"You pay for a Subscription for {} month(s)",
user.language
).format(n_months),
provider_token=settings.PAYMENT_TOKEN,
currency='usd',
photo_url='https://i1.wp.com/bestservices.reviews/wp-content/'
'uploads/2019/09/Subscription-Billing.jpg?w=1200&ssl=1',
photo_height=300, # !=0/None or picture won't be shown
photo_width=600,
photo_size=512,
start_parameter='subscription-telegram-bot',
is_flexible=False, # True If you need to set up Shipping Fee
prices=prices_inner,
invoice_payload=f"{n_months}"
)
if not user.is_pro:
bot.send_message(
msg.chat.id,
_(
'When buying a Subscription, you get access to:\n'
'1. Unlimited number of alerts per day\n'
'2. Forecasts from experts\n'
'3. Adding your currencies to alerts\n'
'And more! \n\nBuy a Subscription '
'today, and you will not regret it',
user.language
),
reply_markup=kbs([
_('Yes, I want to!', user.language),
_('No, thanks', user.language)
])
)
bot.register_next_step_handler(msg, confirm_payment)
else:
bot.send_message(
msg.chat.id,
_('You have already subscribed!', user.language)
)
return start_bot(msg)
@bot.pre_checkout_query_handler(func=lambda query: True)
def checkout_handler(pre_checkout_query):
user = User(pre_checkout_query.from_user.id)
bot.answer_pre_checkout_query(
pre_checkout_query.id,
ok=True,
error_message=_(
"Oops, some error occurred, please try again later",
user.language
)
)
@bot.message_handler(content_types=['successful_payment'])
def subscription_payment_success(msg):
user = bot.session.user
n_months = int(msg.successful_payment.invoice_payload)
datetime_expires = get_now() + datetime.timedelta(days=n_months*31)
user.init_premium(datetime_expires)
bot.send_message(
msg.chat.id,
_(
"You have activated the Subscription until {}\nHappy trades!",
user.language
).format(
convert_to_country_format(
adapt_datetime(datetime_expires, user.timezone),
user.language
)
)
)
settings.logger.info(
"{} paid for subscription until {}".format(
str(user), adapt_datetime(datetime_expires, 0)
)
)
return start_bot(msg)
@bot.message_handler(commands=['language'])
def change_language(msg):
user = bot.session.user
buttons = [_('Russian 🇷🇺', user.language), _('English 🇬🇧', user.language)]
def confirm_language(msg_inner):
if buttons[0] == msg_inner.text:
user.update(language='ru')
elif buttons[1] == msg_inner.text:
user.update(language='en')
else:
bot.send_message(
msg_inner.chat.id,
_(
"❗ Choose only from the suggested languages ❗",
user.language
),
reply_markup=kbs(buttons)
)
return bot.register_next_step_handler(
msg_inner, confirm_language, user
)
bot.send_message(
msg_inner.chat.id,
_("Language changed successfully", user.language)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'At the moment, the service has two languages: '
'Russian 🇷🇺 and English 🇬🇧',
user.language
),
reply_markup=kbs(buttons)
)
bot.register_next_step_handler(msg, confirm_language)
@bot.message_handler(commands=['techsupport'])
def send_techsupport_message(msg):
user = bot.session.user
if not user.is_staff:
bot.send_message(
msg.chat.id,
_(
'⚙ This is techsupport of @{} ⚙\n'
'Feel free to send us any feedbacks about this bot,'
' we are always grateful for your help!',
user.language
).format(bot.get_me().username),
reply_markup=inline_kbs(
{
_(
'Send message to Techsupport', user.language
): 'send_message_to_techsupport'
}
)
)
else:
bot.send_message(
msg.chat.id,
_('⚙ You are already a staff member ⚙', user.language)
)
return start_bot(msg)
@bot.callback_query_handler(
lambda call: call.data == 'send_message_to_techsupport'
)
def send_message_to_techsupport(call):
def send_message(msg):
answer_msg = ''
support_id = None
try:
for support_id in get_json_config().get('techsupportIds'):
bot.forward_message(
chat_id=support_id,
from_chat_id=msg.chat.id,
message_id=msg.message_id
)
except Exception:
answer_msg = _("Some error occurred", user.language)
print(f"ERROR: cannot send support message to {support_id}")
else:
answer_msg = _("Your message was received", user.language)
finally:
bot.send_message(msg.chat.id, answer_msg)
bot.clear_step_handler(msg)
return start_bot(msg)
if call.message:
user = bot.session.user
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=call.message.text
) # make the button disappear
bot.send_message(
user.id,
_(
'Напишите сообщение техподдержке ({} - возврат в меню)',
user.language
).format('/menu', bot.get_me().username)
)
bot.register_next_step_handler(call.message, send_message)
@bot.message_handler(commands=['help'])
def send_bot_help(msg):
user = bot.session.user
help_message = "Bot's commands:\n" + '\n'.join([
'{} - %s' % v for k, v in bot.full_bot_commands.items()
])
bot.send_message(
msg.chat.id,
_(
help_message,
user.language
).replace('{ }', '{}').format(*list(bot.full_bot_commands.keys()))
)
return start_bot(msg, to_show_commands=False)
###############################################################################
@schedule.repeat(schedule.every(3).minutes)
def update_rates():
for parser in currency_parser.parsers.values():
if not parser.update_value(safe=True):
settings.logger.error(f"Rate {parser.iso}-USD can not be updated")
settings.logger.debug("Rates updated")
@schedule.repeat(schedule.every(10).minutes)
def update_proxies():
proxies = get_proxy_list()
for parser in currency_parser.parsers.values():
parser.proxy_list = proxies
settings.logger.debug(f"Proxies updated, length: {len(proxies)}")
@schedule.repeat(schedule.every(3).minutes)
@settings.logger.catch_error
def check_premium_ended():
def check_user_premium_ended(usr):
if not check_datetime_in_future(usr.is_pro):
bot.send_message(
usr.id,
_(
'Your premium has expired, but you can always refresh it!',
usr.language
)
)
usr.delete_premium()
settings.logger.info(f"{usr} lost premium")
with futures.ThreadPoolExecutor(max_workers=50) as executor:
for user in User.get_pro_users(only_temp=True):
executor.submit(check_user_premium_ended, user)
@schedule.repeat(schedule.every().minutes.at(':00'))
@settings.logger.catch_error
def verify_predictions():
for pred in Prediction.get_unverified_predictions():
user = User(pred.user_id)
try:
pred_res = currency_parser.get_rate(pred.iso_from, pred.iso_to)
except exceptions.ParsingError:
settings.logger.error(
f"Rate {pred.iso_from}-{pred.iso_to} is unreachable"
)
user.create_prediction(
pred.iso_from,
pred.iso_to,
pred.value,
pred.up_to_date + datetime.timedelta(0, 5*60) # 5 minutes
)
bot.send_messsage(
pred.user_id,
_(
"The rates are unreachable, "
"the prediction `{}` was scheduled for 5 minutes later",
user.language
).format(pred.trepr(user))
)
pred.delete(force=True)
else:
pred.update(real_value=pred_res.get(pred.iso_to))
diff = currency_parser.calculate_difference(
old=pred.value, new=pred.real_value
)
bot.send_message(
pred.user_id,
_(
'Results of `{}`:\n*Predicted value:* {}\n'
'*Real value:* {}\n*Percentage difference:* {}',
user.language
).format(
pred.trepr(user),
prettify_float(pred.value),
prettify_float(pred.real_value),
prettify_percent(
diff.get('percentage_difference'), to_sign=True
)
),
parse_mode='Markdown'
)
settings.logger.debug(f"{str(pred)} verified")
@schedule.repeat(schedule.every().minutes.at(':00'))
@settings.logger.catch_error
def start_alarms():
t = get_now().strftime('%H:%M')
with futures.ThreadPoolExecutor(max_workers=50) as executor:
for user in User.get_users_by_check_time(t):
executor.submit(send_alarm, user, t)
@settings.logger.catch_error
def send_alarm(user, t):
for k, v in user.get_currencies_by_check_time(t).items():
try:
rate = currency_parser.check_delta(
k, 'USD',
v.get('value'), v.get('percent_delta')
)
except exceptions.ParsingError:
settings.logger.error(f"Rate {k}-USD is unreachable")
bot.send_message(
user.id,
_(
"The rates are not available, "
"the notification can not be sent",
user.language
)
)
else:
if rate.get('new', None) is not None:
new, old = rate.get('new'), rate.get('old')
user.update_rates(k, value=new)
try:
bot.send_message(
user.id,
_(
'*Notification*\n*{}* = *{} USD*\n'
'The change: *{:+} ({})*\n'
'Previous: *{} = {} USD *',
user.language
).format(
k,
prettify_float(new),
prettify_float(rate.get('difference')),
prettify_percent(
rate.get('percentage_difference'),
to_sign=True
),
k,
prettify_float(old)
),
parse_mode='Markdown'
)
settings.logger.debug(
f"Sent '{k}-USD' alarm for {str(user)}"
)
except telebot.apihelper.ApiTelegramException:
# from traceback: "Bad Request: chat not found"
user.update(is_active=0)
settings.logger.warning(f"{str(user)} is not reachable")
# not to notify anymore, since chat is not reachable
def schedule_thread():
while True:
schedule.run_pending()
time.sleep(1)
def main():
import logging
telebot.logger.setLevel(logging.DEBUG)
settings.logger.set_level('debug')
settings.logger.info("Bot started")
threading.Thread(target=schedule_thread, daemon=True).start()
bot.polling()
settings.logger.info("Bot stopped")
###############################################################################
if __name__ == '__main__':
main()
|
simulator.py
|
import Tkinter as tk
import paho.mqtt.client as mqtt
from threading import Thread
import json
broker = "localhost"
client = mqtt.Client(client_id="c1",clean_session=False)
client.loop_start()
client.connect(broker)
root = tk.Tk()
root.geometry('%dx%d+%d+%d'%(370,250,200,150))
root.title('Node-Simulator')
w = tk.Label(root,text="rt :",fg="black").grid(row=0)
w = tk.Label(root,text="bn :",fg="black").grid(row=1)
w = tk.Label(root,text="id :",fg="black").grid(row=2)
w = tk.Label(root,text="---Parameters---",fg="black").grid(row=3)
w = tk.Label(root,text="Parameter name :",fg="black").grid(row=4)
w = tk.Label(root,text="Parameter value :",fg="black").grid(row=5)
w = tk.Label(root,text="Parameter Unit :",fg="black").grid(row=6)
var = tk.StringVar(root)
var.set("choose resource type")
var2 = tk.StringVar(root)
var2.set("choose base name")
#w = tk.Label(root,text="value :",fg="black").grid(row=1)
#text1 = tk.Entry(root)
#text1.grid(row=1,column=1)
text2 = tk.Entry(root)
text2.grid(row=2,column=1)
text3 = tk.Entry(root)
text3.grid(row=4,column=1)
text4 = tk.Entry(root)
text4.grid(row=5,column=1)
text5 = tk.Entry(root)
text5.grid(row=6,column=1)
rt_list=["","oic.r.temperature","oic.r.humidity"]
option = tk.OptionMenu(root,var,*rt_list).grid(row=0,column=1)
bn_list=["","IIITb/IoTLab/test/temperature","IIITb/IoTLab/test/humidity"]
option2 = tk.OptionMenu(root,var2,*bn_list).grid(row=1,column=1)
def endclient():
global client
while 1:
try:
pass
except KeyboardInterrupt:
client.disconnect()
exit()
def select():
global var,var2,text2,text3,text4,text5,client
payload={}
payload["rt"]=var.get()
payload["bn"]=var2.get()
payload["id"]=text2.get()
payload["e"]=[{"n":text3.get(),"v":text4.get(),"u":text5.get()}]
rc=client.publish(payload['bn'],json.dumps(payload),retain=True)
print ("rc=",rc)
#print var.get()
#print text1.get()
print (payload)
if __name__ == '__main__':
thread = Thread(target = endclient)
thread.setDaemon(True)
thread.start()
button = tk.Button(root,text="Create",command=lambda: select()).grid(row=7,column=2)
root.mainloop()
|
server.py
|
import sys
import os
sys.path.insert(1,os.path.dirname(os.getcwd()))
import sqlite3 as sql
import imqserver.db_utils as du
from imqserver.server_util import *
from imqserver.database import *
class Server(object):
def __init__(self,db_name):
self.db = DATABASE(DATABASE_NAME=db_name)
self.server = ServerUtils(self.db)
def run_server(self):
print("Server is listening!!!")
receive_thread=threading.Thread(target=self.server.receive,args=(self.db,))
receive_thread.start()
manage_thread = threading.Thread(target=self.server.ManageQueue)
manage_thread.start()
if __name__== "__main__":
new_server = Server("client_server.db")
new_server.run_server()
|
example_204_no_content.py
|
# -*- coding: utf8 -*-
import time
import threading
from icapserver import *
set_logger('debug')
class ExampleICAPHandler(BaseICAPRequestHandler):
def example_OPTIONS(self):
self.set_icap_response(200)
self.set_icap_header('Methods', 'RESPMOD, REQMOD')
self.set_icap_header('Service', 'ICAP Server' + ' ' + self._server_version)
self.set_icap_header('Options-TTL', '3600')
self.set_icap_header('Preview', '0')
self.send_headers(False)
def example_REQMOD(self):
self.no_adaptation_required()
def example_RESPMOD(self):
self.no_adaptation_required()
class ExampleICAPServer():
def __init__(self, addr='', port=13440):
self.addr = addr
self.port = port
def start(self):
self.server = ICAPServer((self.addr, self.port), ExampleICAPHandler)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
return True
def stop(self):
self.server.shutdown()
self.server.server_close()
self.thread.join(2)
return True
try:
server = ExampleICAPServer()
server.start()
print 'Use Control-C to exit'
while True:
time.sleep(1)
except KeyboardInterrupt:
server.stop()
print "Finished"
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
output_placeholder = None
with tf.variable_scope(scope):
hidden_layer = input_placeholder
for _ in range(n_layers):
hidden_layer = tf.layers.Dense(size, activation=activation)(hidden_layer)
output_placeholder = tf.layers.Dense(output_size, activation=output_activation)(hidden_layer)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "network",
self.n_layers, self.size)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "network",
self.n_layers, self.size)
sy_logstd = tf.get_variable("logstd", shape=[self.ac_dim], trainable=True,
dtype=tf.float32)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
actions = tf.random.categorical(sy_logits_na, 1, dtype=tf.int32)
sy_sampled_ac = tf.squeeze(actions, 1)
else:
sy_mean, sy_logstd = policy_parameters
normal_sample = tf.random_normal(tf.shape(sy_mean))
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * normal_sample
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size,)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
logprobs = tf.log(tf.math.softmax(sy_logits_na, axis=1))
range_tensor = tf.reshape(tf.range(tf.shape(logprobs)[0]), [-1, 1])
action_indices = tf.concat([range_tensor, tf.reshape(sy_ac_na, [-1, 1])], 1)
sy_logprob_n = tf.gather_nd(logprobs, action_indices)
else:
sy_mean, sy_logstd = policy_parameters
logprob_na = (-0.5 * (tf.square(sy_ac_na - sy_mean) / tf.square(tf.exp(sy_logstd))) \
- 0.5 * tf.log(2 * np.math.pi)) - sy_logstd
sy_logprob_n = tf.reduce_sum(logprob_na, axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
self.loss = -tf.reduce_mean(self.sy_logprob_n * self.sy_adv_n)
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size), 1)
self.sy_target_n = tf.placeholder(shape=[None], name="targets", dtype=tf.float32)
baseline_loss = tf.reduce_mean(tf.square(self.sy_target_n - self.baseline_prediction))
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={
self.sy_ob_no: ob.reshape(1, -1)})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = []
if self.reward_to_go:
for rew in re_n:
rew_tg = []
for r in rew[::-1]:
if len(rew_tg) == 0:
rew_tg.append(r)
else:
rew_tg.append(r + self.gamma * rew_tg[-1])
rew_tg.reverse()
q_n.append(np.array(rew_tg))
else:
for rew in re_n:
rew_sum = 0
for r in rew[::-1]:
rew_sum += r + self.gamma * rew_sum
q_n.append(rew_sum * np.ones_like(rew))
q_n = np.concatenate(q_n)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no})
b_n = np.std(q_n) * b_n + np.mean(q_n)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-9)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
target_n = (q_n - np.mean(q_n)) / (np.std(q_n) + 1e-9)
self.sess.run(self.baseline_update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_target_n: target_n
})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
feed_dict = {
self.sy_ob_no: ob_no,
self.sy_ac_na: ac_na,
self.sy_adv_n: adv_n
}
loss_before = self.sess.run(self.loss, feed_dict=feed_dict)
logz.log_tabular('LossBeforeUpdate', loss_before)
self.sess.run(self.update_op, feed_dict=feed_dict)
loss_after = self.sess.run(self.loss, feed_dict=feed_dict)
logz.log_tabular('LossAfterUpdate', loss_after)
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
if args.n_experiments == 1:
train_func()
else:
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
test_device.py
|
import logging
import time
import threading
import scpidev
FORMAT = "%(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
# logging.basicConfig(format=FORMAT, level=logging.INFO)
# Define our callback functions
def test_function(*args, **kwargs):
print("## Execute. ##")
i = 0
for arg in args:
time.sleep(1)
i += 1
print("Got arg: {}".format(str(arg)))
return i
def test_function2(test):
print("## Execute. ##" + str(test))
# Define some test command strings
command_strings = [
# "*RST",
# "*IDN?",
"MEASure:CURRent[:DC]? [{<range>|AUTO|MIN|MAX|DEF} [,{<resolution>|MIN|MAX|DEF}] ]",
"MEASure[:VOLTage][:DC]? [{<range>|AUTO|MIN|MAX|DEF} [,{<resolution>|MIN|MAX|DEF}] ]",
"[SENSe:]VOLTage[:DC]:NULL[:STATe] {ON|OFF}",
":VOLTage[:DC]:NULL[:STATe] {ON|OFF}",
"CALCulate:FUNCtion {NULL | DB | DBM | AVERage | LIMit}",
"""
MEASure[:VOLTage][:DC]?
[{<range>|AUTO|MIN|MAX|DEF} [, {<resolution>|MIN|MAX|DEF}] ]
""",
]
# # Define some test commands, which will be sent to our device
# test_commands = [
# # "*RST",
# # "*IDN?",
# "CONF AUTO",
# "MEAS:CURREnt? 10 A, MAX",
# "XXX?",
# ]
# # Create the instance of our SCPI device
# dev = scpidev.SCPIDevice(
# name="My SCPI Device",
# )
# # Create commands
# for cmd in command_strings:
# dev.add_command(
# scpi_string=cmd,
# callback=test_function,
# )
# # Crate the communication interfaces
# # dev.create_interface("tcp")
# dev.create_interface("udp")
# # dev.create_interface("serial", port="COM7", baudrate="500000", dsrdtr=1)
# t = threading.Thread(target=dev.run)
# t.start()
# time.sleep(5)
# dev.stop()
# t.join()
# exit()
# # try:
# # dev.run()
# # except KeyboardInterrupt:
# # dev.stop()
# # exit()
# print("\n-- LIST COMMANDS: -------")
# print(dev.list_commands())
# print("\n-- EXECUTE: -------------")
# for cmd_str in test_commands:
# print(cmd_str)
# dev.execute(cmd_str)
# print("\n-- COMMAND HISTORY: ------")
# for c in dev.get_command_history():
# print(c)
# print("\n-- ALARMS: --------------")
# while True:
# alarm = dev.get_last_alarm()
# if alarm is None:
# break
# print(alarm)
|
myRL_2_server_no_training.py
|
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import base64
import urllib
import sys
import os
import json
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import tensorflow as tf
import time
import a3c
import multiprocessing
import time
import copy
import socket
import fcntl
import matplotlib.pyplot as plt
S_INFO = 6 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 448
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
# NN_MODEL = None
NN_MODEL = '../rl_server/results/pretrain_linear_reward.ckpt'
################################
#multiprocessing share variables
manager=multiprocessing.Manager()
Que1=manager.list()
Que2=manager.list()
Dict1=manager.dict()
Dict2=manager.dict()
begin_time=time.time()
QueOnline=manager.list()
DictOnline={}#remember last quality for each IP
MultiClientState={}# format:{"IP":[(int)heartbeat_time_not_request_time,(int)quality]}
################################
# video chunk sizes
size_video1=[1756806,3091206,2210154,1845731,1817275,2069902,2117640,2061264,2237179,2132441,2044975,3789197,3250223,2487213,2149619,1765477,
2505293,2673223,2084351,2069989,1855189,2478422,2580412,2065841,2585352,1351167,1398486,1725385,2897186,4738096,1670320,1756062,
3048206,4866144,1843384,1584205,1884317,1858789,1038538,798577,2117675,2528940,1398909,3205655,2983891,2201743,2366969,2553838,
1501437,1267022,1644497,1367567,1203298,3427696,1968656,3096706,2066317,2634682,1694746,1434945,3173242,1693021,1682424,2113373,
3103217,2462552,2256905,2226073,1980055,2037901,2470135,2128194,2434345,1714265,1330462,2102803,1015863,865084,1634635,1229781,
1227461,1383375,1572941,1624467,1260343,2100804,1782111,3028204,1845902,1283138,1529032,1782594,1613129,1621860,1702228,1935782,
1908470,1820040,1542276,2025509,1672002,1681633,3771816,5057431,3537995,2803543,3831917,2709325,3627028,2349666,2466424,2215131,
2249004,1704399,1689433,1362005,1565350,2242939,2378241,2021904,2019269,1054954,2328052,2211591,2104177,2280895,1991117,1857563,
2209717,1711273,1293338,1289551,1976534,2284536,1925431,2869117,2469558,1435620,1240014,1811217,2988838,2552354,2365918,2065200,
2555376,1779921,2281000,2856816,2252510,1331370,1565606,1548717,3429310,1957226,1744946,1736383,2170169,2128049,2573352,2262965,
2877128,2632416,2110319,2309152,2087447,2303868,3110829,4470951,4276187,2646215,2596715,1701057,2932345,2622505,2362883,2360631,
3172401,3599259,2951048,1968506,2345232,1739989,1303134,1273197,1463247,1841675,2594747,3307177,1289034,2849319,2067334,1658999,
1451249,2074198,1510216,2665640,2975156,1903333,3534501,4269578,4256969,4212087,983135,527732,391690,1403108,1438881,1452557,
1917879,1613934,1871217,1188694,2512090,2858958,1531701,1008854,1003379,1815850,1586467,980482,1439500,2289989,2449550,3404941,
3365845,2830877,4573090,2648680,4028108,5273438,3649905,3386154,2446726,2965108,2245612,1832510,2071735,1755973,2019058,1360561,
1039489,1894295,1999107,1666014,2242594,1746466,2870885,2284279,1714119,2282746,1999381,2436148,1828361,2833893,2132959,1585105,
2275927,2131090,2951419,2197713,2049883,1657043,2195265,2978021,2007940,1712613,1729774,1533013,3056849,3034214,3327704,3120601,
2265234,1983515,2468537,2171814,1750435,1885298,2056222,2409637,1384308,1073859,1993041,2524543,2684433,2749667,1487433,2299203,
1711371,1882897,1979814,2600016,2829907,2024223,2435121,1745680,1733204,2311748,2360093,2962846,2530685,2333345,2573975,2688803,
1674837,2328829,2654846,2177220,1983637,1826992,1554600,1742047,1015182,1327517,1392909,1997961,2777906,2151277,1385355,1841831,
2576036,2248077,1670266,1921688,2513568,2592109,1866077,2254994,3076104,2892882,2637278,2258700,1223635,905654,900966,532695,
678430,1684441,1272715,1174559,1071726,1261171,1574531,1726304,1393375,1612197,1577541,1178594,1331352,1471475,1258708,1417142,
1337069,1753784,3098761,1712958,1487216,1749591,2094655,1655374,1838915,1632130,4455112,1103313,4325538,4260027,3363232,1966800,
2387229,2734086,2389536,2457011,2795839,2917015,2516264,2127460,2593348,3241121,3966814,3003788,1984507,2589085,2196063,1610600,
1378770,2396778,1976157,1717434,669393,1027820,1375132,1464032,1326640,1729066,1534541,1787945,2596315,3393474,2786962,3161567,
2753054,2801599,3086005,2440861,3156653,4016406,3399126,3785131,4186971,3408842,2612351,2792930,2184320,1364863,1291497,958698,
1640227,1815859,1795500,2069010,2016002,1406199,1373710,1718790,980021,862871,990244,1247321,1934872,1727416,1281950,1283997,
2167162,1437622,911988,1208836,1855819,1746139,2142901,3077141,2097075,1667617,2375729,1176383,1534788,2019092,1649060,1119606,
2066820]
size_video2=[1248166,1909948,1437354,1206293,1202036,1374260,1394562,1352039,1499553,1420399,1360662,2352325,2206383,1618768,
1455386,1204706,1713574,1864652,1448970,1398569,1293903,1662378,1778570,1405415,1767145,754576,789631,1047145,1830919,3283497,
1110502,1143921,2082236,3252018,1219923,1071692,1295207,1266141,656576,503078,1354183,1699087,927720,2208172,2011759,1494987,
1602807,1716281,996382,808047,975928,884332,755695,2258444,1301747,2091230,1441438,1791927,1142314,948784,2118602,1134808,1088077,
1419506,2094634,1666971,1573121,1445975,1315146,1393944,1676874,1438847,1587400,1082750,855365,1309596,616101,522811,1009092,
755328,744447,856311,990560,994122,741287,1350804,1149553,2095051,1184299,762583,968586,1179001,1003173,998496,1057590,1243591,
1237504,1117387,937314,1261624,1166183,1171457,2696482,3460711,2432287,1831251,2639863,1888769,2576440,1610171,1708230,1492094,
1538209,1132001,1123038,874553,1004636,1426699,1544177,1349606,1360880,645082,1354293,1398892,1451433,1504901,1328553,1263252,
1509891,1153670,855640,864167,1392355,1511324,1301036,1948238,1647259,955411,816968,1185012,2007860,1648783,1522896,1335718,
1707248,1085428,1457959,1994052,1475727,828972,948348,933982,2382507,1225258,1097507,1118835,1448416,1390061,1695141,1496810,
1954410,1774003,1366911,1524592,1368957,1501570,2095420,3114760,2838416,1502515,1694876,1053663,2100929,1903225,1667629,1663218,
2248474,2551140,2051397,1347603,1626107,1164880,871909,857484,973494,1264289,1741906,2304449,845899,1950152,1361535,1096620,
956379,1374366,979791,1713882,1980346,1253742,2331705,2782848,2771738,2807548,644361,352430,247261,924748,983983,978337,1273457,
1072491,1233180,753303,1719760,1976297,1020941,643472,632199,1212648,1033471,622503,954344,1418860,1581120,2280953,2273723,
1722839,3004290,1786110,2762113,3508086,2471169,2290623,1631933,2022588,1501694,1221686,1392053,1162530,1350142,916630,692591,
1272848,1376995,1130650,1511110,1188451,1956043,1553905,1190117,1536041,1334153,1620445,1229638,1904189,1437879,1043343,1484736,
1389038,1962114,1379569,1348907,1083199,1464620,1986660,1331590,1086919,1129684,1020726,2049670,2077307,2244912,2092287,1502555,
1329093,1638317,1432601,1186820,1259056,1378272,1592067,894118,702494,1328338,1707818,1858005,1814721,965118,1491287,1130946,
1245095,1297373,1761282,1887826,1337368,1614799,1121034,1145238,1497043,1606601,2025110,1710529,1583480,1723662,1810776,1113208,
1547386,1774950,1421925,1206322,1187183,1004007,1147471,676151,894621,880733,1266385,1848743,1457129,887321,1185256,1683346,
1454053,1091702,1298560,1702106,1712364,1162421,1518078,2105991,1963481,1783520,1462072,721990,579786,589643,344866,427515,
1117244,806288,741042,675112,787869,1011434,1126209,885267,1055611,1018506,773227,870077,912214,776772,883886,862865,1150468,
2067548,1099289,945530,1150026,1362064,1050127,1197301,1075450,2836687,702922,2875327,2778004,2245324,1287876,1575207,1779274,
1563888,1703575,1879597,1981220,1706876,1336949,1679947,2160617,2693480,2009306,1332161,1758489,1457012,1054975,926778,1589787,
1315164,1139932,406770,664625,936523,928176,835472,1167407,994739,1185573,1740000,2319760,1837859,2103152,1854032,1873751,2125146,
1614715,2116308,2777412,2292582,2515009,2837060,2395144,1790486,1913686,1448776,902340,828891,617586,1081453,1195033,1179707,
1339413,1300244,935908,880962,1098413,618451,537171,620261,773863,1240249,1093356,802481,790748,1415323,837047,545014,773276,
1225405,1133886,1437142,2045825,1351366,1027020,1495764,704275,989618,1287214,1087634,718747,1318691]
size_video3=[846285,1168830,924155,782361,776921,896171,904410,867529,987852,931970,884019,1388977,1386547,1061921,985293,802234,
1169255,1286193,995130,938950,889120,1097258,1199443,945496,1179962,430164,436726,624519,1113671,2138958,731588,732163,1371730,
2110792,788301,712212,865112,846544,419881,323168,854227,1103578,603737,1462476,1328702,995325,1062304,1130531,658362,515203,
564263,576357,481669,1439148,831514,1345162,986175,1206557,761735,621067,1371358,739751,691765,948480,1381127,1093177,1075045,
921199,858138,939164,1113522,952278,1002220,678313,545154,794368,368560,322602,627633,465516,454419,527661,617205,601121,418400,
838302,720424,1421671,743692,444613,590983,767637,605102,586040,629895,773906,770305,679673,553179,767895,798336,717269,1749944,
2232941,1630935,1191422,1750938,1335785,1831757,1108036,1190875,1006044,1040709,746704,736186,559337,646623,884342,996032,902282,
898520,388061,706020,837590,997771,984903,869629,841845,1003621,765322,549112,567129,962434,983686,849944,1297068,1068550,630926,
534534,753751,1297143,1033674,972729,860044,1146757,643290,916479,1371688,950221,503853,565079,558122,1579179,764904,684818,714375,
958026,897292,1095530,976392,1284670,1157384,849960,983202,885117,949242,1378747,2093615,1794015,892920,1070196,636000,1427417,
1358293,1161687,1148764,1556485,1755196,1391857,901239,1101441,767029,575457,571960,640246,852139,1153342,1551623,552146,1303983,
884697,728329,631483,890909,629541,1057592,1264644,812359,1495774,1802682,1794299,1809999,421592,234510,162002,598631,660455,
650412,831883,704816,796782,469916,1141450,1332339,673944,405808,393579,790772,668101,391316,620897,855778,987162,1437210,1494618,
1000189,1977624,1160710,1853267,2272158,1620476,1512714,1065616,1349832,985649,800298,916009,747151,878787,611733,458891,824552,
936781,763908,1005463,805397,1309198,1027202,824776,1018133,878999,1059264,816116,1245755,950480,675165,934743,881605,1262539,
836769,868241,689535,960324,1290799,875221,677750,719309,673009,1332185,1381609,1467929,1364835,972063,879023,1062308,925128,
796868,822789,917077,1038227,572879,460030,870647,1135715,1267450,1170787,608866,932475,718075,794316,835131,1173614,1226376,
873792,1039123,698256,744176,962960,1076340,1357311,1134278,1063750,1129502,1193512,731147,1008405,1172782,916351,714909,746975,
628955,733798,452985,599131,547008,788141,1187992,947166,556402,745185,1072325,919245,703608,867170,1130427,1110818,720520,1007762,
1397415,1311440,1185457,919927,415043,381670,384138,221070,272611,721164,508382,463087,418721,476494,636457,721220,555097,676089,
633209,496792,565895,553631,472079,531680,549381,738800,1333841,682133,579828,733952,859037,656064,756593,693793,1828137,431863,
1810452,1836670,1447052,837477,1007940,1130632,997037,1164277,1231827,1316193,1135411,817342,1051188,1391898,1762282,1306967,
877949,1172156,944666,677181,614653,1029902,861520,751279,251924,434194,637408,585673,517743,779377,624265,767662,1141932,1552512,
1182714,1350835,1216575,1221492,1437167,1047801,1352884,1866550,1498852,1594916,1933364,1666636,1216493,1299406,946556,587152,
523357,398282,698490,768546,747186,839672,816283,609526,551500,685818,385510,332617,384081,472836,784876,681576,495325,478054,
910864,486727,327909,490384,787676,714464,934579,1322102,836378,608941,898288,419176,631361,777189,710660,463377,848825]
size_video4=[547035,706404,596043,524098,504228,582524,590858,552807,649725,609806,581924,835167,856359,720885,648993,549888,798544,
890208,680375,621228,612247,714936,801526,636640,781633,258480,256981,381833,668878,1316285,483083,470324,887319,1346096,488576,
476883,574255,560775,275294,213942,544631,721930,394905,956401,866807,668112,707053,752293,439005,328990,332676,381240,315599,
905000,536920,856841,676222,814761,511744,406110,872426,478738,441067,638082,902857,705191,735017,597647,564153,640146,744700,
634426,622405,429916,348023,473333,223233,207060,398798,297699,289124,338019,386894,376068,247323,529278,458771,954008,469848,
268451,367008,510493,384351,336696,365757,469230,466878,397890,306208,392038,480889,427503,1061331,1462570,1107397,788212,1201905,
958934,1296354,764232,834022,684159,703462,494688,476757,358278,421053,547982,651712,605673,604769,247633,362988,500301,679289,
636811,569262,554524,657393,500344,353603,370888,654913,640820,555403,854536,682544,425652,353977,482904,831613,646249,623250,
570778,781138,395629,591756,919672,608636,315279,348908,341251,1028395,493213,433388,461614,633669,582445,710571,635445,829185,
740760,520948,625161,572429,587024,885619,1366909,1096009,549068,693014,384613,967739,961765,802806,786390,1063204,1193221,938432,
594814,738128,514183,385394,386211,419937,569630,759702,1035614,363332,867267,584199,495296,418710,579747,407271,643695,793432,
532780,953519,1181184,1173164,1150240,278260,158326,109243,391560,447495,432372,541903,462974,514903,297437,746687,889772,446977,
261064,245091,514842,433432,248997,401709,510992,623671,875583,954252,565854,1282428,760254,1230934,1471145,1041466,1007408,
700685,908906,647372,531923,604648,480567,571680,415481,311725,528791,648577,526915,676767,544984,877852,681274,584479,682400,
587249,697584,541523,819236,635454,439248,575534,558134,795960,507237,560309,435884,630696,842280,584377,418701,452008,447495,
855620,910486,955619,874290,634816,588917,688253,601008,545601,546370,622967,696809,377403,307085,582646,767567,881993,759744,
380057,569142,450995,500151,533009,787180,796757,579408,665424,428991,486141,634709,724968,910350,755342,723301,744499,791097,
486696,650661,775896,589564,417632,460207,386577,461058,309090,401728,335814,488570,758867,599018,354581,449831,677583,583268,
452635,579431,752699,725899,457825,661835,924337,879308,792148,572914,236078,252664,248583,143285,173576,464535,323435,290071,
259483,286196,396866,459208,346403,429612,379429,317461,373328,334657,285622,316216,347387,474325,846736,421261,358587,460670,
540837,418151,473605,443747,1142146,266099,1139106,1226865,912006,544488,637168,726559,633507,783324,803464,874546,749552,490660,
644883,880869,1134430,839081,575502,778336,608858,437231,411106,666015,563343,500243,160495,290749,441946,380307,327141,528851,
386873,499151,742431,1004036,756402,854695,798836,797035,965829,672367,837390,1234139,962167,972983,1314591,1183313,847271,900132,
623507,383196,331639,259707,448397,491216,470078,535948,506772,404948,343057,429095,241972,208979,237532,289286,502020,428997,
308660,291778,588501,298147,204497,313212,504692,445722,619353,831848,511452,357941,535866,252048,403999,477594,454970,301303,551953]
size_video5=[323113,418441,382004,337817,318822,366200,363903,346976,404249,383861,369141,500281,492772,467460,412406,364336,530546,
595068,453373,400416,406242,447605,508492,416723,492336,153985,149450,221825,389137,790219,302059,288733,540456,825815,285915,
304614,354511,356853,174974,139405,344879,446520,249322,594647,540016,434577,456950,491623,284629,206793,194787,245465,201172,
543139,328951,533104,446793,532154,333255,259306,513006,294784,273182,414589,562032,426081,491024,375053,356030,434816,485000,
415484,363173,267232,217152,268349,130234,129844,244414,183197,181289,211852,230048,232458,147458,323339,286466,621150,292710,
157388,224852,330448,244658,189794,208443,272864,272767,219585,160716,199810,281265,234643,623111,905443,715137,496016,757193,
653100,866715,509267,565709,439095,458179,317013,299723,220237,265702,326004,406891,398108,396428,161148,189747,289152,438311,
391808,350823,342642,404291,312421,215746,231048,419638,401633,350467,540680,413555,274948,226952,298374,504645,399332,385815,
376112,518000,240102,380381,592007,379115,193082,217973,203101,629581,312102,266984,289355,406154,364723,444534,405512,503590,
445920,301669,381944,350196,336701,533864,849909,638562,325653,440403,227952,636997,657734,525502,518535,689114,782104,610917,
376978,476526,340219,251135,252753,267845,366877,470621,673027,231795,555250,369340,329086,269267,364173,255834,373785,469492,
336207,598436,747522,744086,688734,182335,102883,71090,251738,294267,277818,338245,291138,317642,182073,467537,572660,290618,
163813,145742,323299,269998,151414,247136,299386,379185,511734,583799,309771,794744,474007,777870,916062,639704,663002,444759,
596148,410568,350269,389119,296238,363553,277452,211307,324543,445667,365955,459618,364370,580715,438804,412688,454548,384954,
449872,351636,532810,418362,278056,331408,337389,468421,287027,339677,265929,405248,543069,387402,240196,265294,288498,506694,
574841,596509,526249,403785,389295,440901,377555,376321,360924,424678,470015,246729,206103,391925,514724,604960,481393,227540,
320553,265336,292954,332903,526009,509974,379518,402580,247420,307887,399296,490999,599427,493224,485382,474936,511692,327348,
403054,509642,370452,220414,268766,223824,273431,210187,260215,194459,282781,458374,361910,222321,254376,398627,353268,277424,
373952,485170,458908,283968,415847,594244,581598,513771,336212,123056,159415,152039,84419,96964,296357,197550,174412,150205,
163490,234384,276420,206155,251134,207262,189865,234699,190492,162133,172192,208515,294919,506806,243271,208423,266189,317494,
252397,272579,266038,626921,160573,687288,805076,516668,334312,382256,432601,382803,509989,497589,559731,472280,271315,372954,
517170,690202,505692,358051,497198,379108,274271,264254,417412,356246,329139,100180,192502,302659,248706,201499,350511,223655,
308401,454270,637270,464928,511545,498959,503850,626394,410515,466441,761200,580059,554024,831652,823388,590577,625131,404481,
242549,205265,168423,277268,309949,278503,325049,292610,262838,201999,257126,143254,124497,137758,167697,308527,256226,182915,
168765,363172,179420,124656,188561,300983,262333,396335,493415,295359,207622,306053,145571,246429,285851,275563,186508,346649]
size_video6=[122566,141690,156437,151455,131958,141687,134848,143568,169611,155749,144962,187567,189741,191607,169931,160854,236280,
279955,203736,174786,193874,187167,207081,196253,203820,58306,52004,70463,114188,248094,120126,105738,187819,288450,107224,132126,
132775,150099,72040,63120,142264,179063,113063,237672,222641,210179,206024,226841,125166,86270,76277,111752,86103,186977,124488,
195494,209856,232665,151864,114023,181418,107522,111914,191996,231947,145572,228523,165245,154746,217987,232697,199480,132247,
114355,92243,101533,44432,46959,92051,64667,69881,82966,70706,91967,52126,115033,106804,257487,110329,52198,86248,137809,98071,
59563,67579,89812,87619,65049,51508,66553,97090,69339,218786,350602,282395,196655,294150,274147,350502,229885,264751,188592,194004,
138597,129254,90055,113934,119577,163598,176947,176958,64953,63686,94317,174842,133878,119038,116797,143402,114567,79187,85619,
158887,158149,136588,211814,149475,111228,90166,110685,182666,164383,153601,193728,240841,89363,172541,249048,155912,72714,96738,
76146,210967,138516,104483,112952,166011,143486,173754,163990,184907,157542,102142,138713,132187,103266,186551,302474,233690,114527,
183684,86990,275527,303484,247110,243197,306068,333494,259092,161551,219694,163689,115479,115867,110157,165717,206413,316094,106605,
258595,167706,161871,126251,164223,106360,140197,171683,142022,226802,274115,317194,289925,80931,38396,28340,124143,139033,128434,
145168,122302,127194,68553,208520,246036,119157,62046,49114,123744,104524,56056,81724,107806,129717,178197,219082,87764,309996,
175234,291302,381763,260114,311747,197184,285496,184984,171407,180922,127859,167708,142347,108401,127627,229023,194597,231589,
188967,293808,207290,225385,222372,182989,208632,165647,262519,198122,119059,136057,151258,207737,126195,142675,116189,196934,
273298,169687,80087,89952,116953,203808,258544,276055,251654,191358,176143,185613,174725,183381,183890,208329,222059,115871,103659,
194619,263618,323870,232819,101175,148358,120409,137639,169775,286516,266060,186239,185178,111048,131835,191865,248460,308506,
263337,268120,252697,279984,174154,193877,250368,165544,97614,128553,106663,133692,98249,131557,84157,120094,191725,157144,106115,
103896,189100,153325,105096,185534,243798,242423,135512,204760,313395,292357,286477,158682,36035,72722,58693,21160,29201,149424,
93095,73211,52395,60533,84569,100012,78060,95461,63814,66318,90387,64036,46982,48426,64363,108625,183411,70708,64343,82518,105266,
82540,70162,71644,64605,51629,207652,169915,122208,106258,133986,162789,140802,190933,160253,206255,174223,70660,113933,173128,
261541,173884,115544,179952,131746,92096,84877,151907,131972,127129,27791,55798,115167,97179,63504,113963,41194,72340,149359,
210948,145277,142456,148052,171092,235134,102985,129884,278803,214629,183098,306658,352088,282790,309863,185129,100329,81350,
64536,120000,135855,104350,136764,97760,99442,67417,84531,36782,30662,33807,40182,96727,72553,43191,38019,107349,45983,30115,
45931,84315,65096,123915,152798,77492,43261,76665,36196,69589,62195,61628,33154,80528]
def get_chunk_size(quality, index):
if ( index < 0 or index > 448 ):
return 0
# note that the quality and video labels are inverted (i.e., quality 8 is highest and this pertains to video1)
sizes = {5: size_video1[index], 4: size_video2[index], 3: size_video3[index], 2: size_video4[index], 1: size_video5[index], 0: size_video6[index]}
return sizes[quality]
class my_socketserver(SocketServer.ThreadingTCPServer):
allow_reuse_address=True
daemon_threads=True
def __init__(self,server_address,RequestHandlerClass):
SocketServer.ThreadingTCPServer.__init__(self,server_address,RequestHandlerClass)
def make_request_handler(input_dict):
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.input_dict = input_dict
self.sess = input_dict['sess']
self.log_file = input_dict['log_file']
self.actor = input_dict['actor']
self.critic = input_dict['critic']
self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
self.a_batch = input_dict['a_batch']
self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
#print post_data
if ( 'pastThroughput' in post_data ):
# @Hongzi: this is just the summary of throughput/quality at the end of the load
# so we don't want to use this information to send back a new quality
print "Summary: ", post_data
elif('heartbeat' in post_data):
if self.client_address[0] not in Que1:
Que1.append(self.client_address[0])
#print('Que1',Que1[:])
#print self.client_address
send_data="receive hb"
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
else:
########## Algorithm write here! Now you get all info! #########
global begin_time
t = float(time.time() - begin_time)
q = int(post_data['lastquality'])
global Dict1,Dict2
if self.client_address[0] in Dict1.keys():
tmp=Dict1[self.client_address[0]]
tmp.append(t)
Dict1[self.client_address[0]]=tmp
tmp=Dict2[self.client_address[0]]
tmp.append(q)
Dict2[self.client_address[0]]=tmp
else:
Dict1[self.client_address[0]]=[t]
Dict2[self.client_address[0]]=[q]
#print(Dict1[self.client_address[0]],Dict2[self.client_address[0]])
global DictOnline,QueOnline
for k in DictOnline:
if k not in QueOnline[:]:
DictOnline.pop(k)
DictOnline[self.client_address[0]]=q
# option 1. reward for just quality
# reward = post_data['lastquality']
# option 2. combine reward for quality and rebuffer time
# tune up the knob on rebuf to prevent it more
# reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
# option 3. give a fixed penalty if video is stalled
# this can reduce the variance in reward signal
# reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)
# option 4. use the metric in SIGCOMM MPC paper
rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
# --linear reward--
reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
- REBUF_PENALTY * rebuffer_time / M_IN_K \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
self.input_dict['last_bit_rate']) / M_IN_K
# --log reward--
# log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))
# reward = log_bit_rate \
# - 4.3 * rebuffer_time / M_IN_K \
# - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)
# --hd reward--
# reward = BITRATE_REWARD[post_data['lastquality']] \
# - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])
self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
# retrieve previous state
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# compute bandwidth measurement
video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
video_chunk_size = post_data['lastChunkSize']
# compute number of video chunks left
video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
self.input_dict['video_chunk_coount'] += 1
# dequeue history record
state = np.roll(state, -1, axis=1)
next_video_chunk_sizes = []
for i in xrange(A_DIM):
next_video_chunk_sizes.append(get_chunk_size(i, self.input_dict['video_chunk_coount']))
# this should be S_INFO number of terms
try:
state[0, -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(np.max(VIDEO_BIT_RATE))
state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
state[2, -1] = float(video_chunk_size) / float(video_chunk_fetch_time) / M_IN_K # kilo byte / ms
state[3, -1] = float(video_chunk_fetch_time) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec
state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte
state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
except ZeroDivisionError:
# this should occur VERY rarely (1 out of 3000), should be a dash issue
# in this case we ignore the observation and roll back to an eariler one
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
self.log_file.write(str(time.time()) + '\t' +
str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
str(post_data['buffer']) + '\t' +
str(rebuffer_time / M_IN_K) + '\t' +
str(video_chunk_size) + '\t' +
str(video_chunk_fetch_time) + '\t' +
str(reward) + '\n')
self.log_file.flush()
action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
# Note: we need to discretize the probability into 1/RAND_RANGE steps,
# because there is an intrinsic discrepancy in passing single state and batch states
# send data to html side
send_data = str(bit_rate)
end_of_video = False
if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
send_data = "REFRESH"
end_of_video = True
self.input_dict['last_total_rebuf'] = 0
self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
self.input_dict['video_chunk_coount'] = 0
self.log_file.write('\n') # so that in the log we know where video ends
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
# record [state, action, reward]
# put it here after training, notice there is a shift in reward storage
if end_of_video:
self.s_batch = [np.zeros((S_INFO, S_LEN))]
else:
self.s_batch.append(state)
def do_GET(self):
print >> sys.stderr, 'GOT REQ'
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
###### onlineCheck #######
def onlineCheck(Que1_,Que2_,QueOL):
while True:
#print('updateQue')
updateQue(Que1_,Que2_,QueOL)
global Dict1,Dict2,MultiClientState,begin_time
f=open("OLlist.json",'r')
fcntl.flock(f,fcntl.LOCK_EX)
try:
MultiClientState=json.load(f)
print(MultiClientState)
except:
MultiClientState={}
for ip in MultiClientState.keys():
if int(time.time())-MultiClientState[ip][0]-10>0:
MultiClientState.pop(ip)
tmp={}
try:
tmp[QueOL[:][0]]=[time.time(),max(max(Dict2.values()))]
except:
pass
MultiClientState.update(tmp)
print(MultiClientState)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
f=open("OLlist.json",'w')
fcntl.flock(f,fcntl.LOCK_EX)
json.dump(MultiClientState,f)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
plot(Dict1,Dict2)
print(multi_agent_reward(Dict2))######### print when onlinecheck or when do_post
time.sleep(5)
def updateQue(Que1_,Que2_,QueOL):
#print('_Que1',Que1_[:])
#print('_Que2',Que2_[:])
#print('_QueOnline',QueOL[:])
QueOL[:]=Que1_[:]+[item for item in Que2_[:] if item not in Que1_[:]]
Que2_[:]=copy.copy(Que1_[:])
Que1_[:]=[]
#print('Que1_',Que1_[:])
#print('Que2_',Que2_[:])
print('QueOnline_',QueOL[:])
##########################
########## plot ##########
def plot(Dictt,Dictq):
color_ = ['black', 'red', 'blue', 'green', 'gold', 'm']
c=0
for k in Dictt.keys():
plt.plot(Dictt[k], Dictq[k], color=color_[c%6])
#print(Dictt[k],Dictq[k])
plt.scatter(Dictt[k], Dictq[k], color=color_[c%6])
plt.title("RL_2")
plt.axis([-1,max(Dictt[k])*1.1,0,6])
c=c+1
plt.pause(1)
##########################
### multi agent reward ###
def multi_agent_reward(Dictq):
#VIDEO_BIT_RATE
total_reward=0
miu=0
sigma=0
lastQ=[]
for k in Dictq.keys():
lastQ.append((Dictq[k])[-1])
miu=np.mean(lastQ)
sigma=np.std(lastQ)
total_reward=miu-sigma
return total_reward
##########################
def run(server_class=HTTPServer, port=8335, log_file_path=LOG_FILE):
np.random.seed(RANDOM_SEED)
assert len(VIDEO_BIT_RATE) == A_DIM
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
with tf.Session() as sess, open(log_file_path, 'wb') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver() # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
init_action = np.zeros(A_DIM)
init_action[DEFAULT_QUALITY] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [init_action]
r_batch = []
train_counter = 0
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'sess': sess, 'log_file': log_file,
'actor': actor, 'critic': critic,
'saver': saver, 'train_counter': train_counter,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch, 'a_batch': a_batch, 'r_batch': r_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
server_address = ('', port)
#httpd = server_class(server_address, handler_class)
httpd = my_socketserver(server_address, handler_class)
print 'Listening on port ' + str(port)
####### onlineCheck ######
global Que1
global Que2
global QueOnline
p = multiprocessing.Process(target=onlineCheck,args=(Que1,Que2,QueOnline))
p.start()
p.deamon = True
##########################
httpd.serve_forever()
def main():
if len(sys.argv) == 2:
trace_file = sys.argv[1]
run(log_file_path=LOG_FILE + '_RL_' + trace_file)
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupted."
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
utils.py
|
# Code copied from https://github.com/github/CodeSearchNet for backward-compatible experimentations
import multiprocessing
from typing import List, Iterable, Callable, TypeVar, Dict, Any, Union
from dpu_utils.utils import RichPath
from pathlib import Path
JobType = TypeVar("JobType")
ResultType = TypeVar("ResultType")
def read_file_samples(file_path: Union[Path, str]) -> List[Dict[str, Any]]:
return RichPath.create(str(file_path)).read_by_file_suffix()
def __parallel_queue_worker(
worker_id: int,
job_queue: multiprocessing.Queue,
result_queue: multiprocessing.Queue,
worker_fn: Callable[[int, JobType], Iterable[ResultType]],
):
while True:
job = job_queue.get()
# "None" is the signal for last job, put that back in for other workers and stop:
if job is None:
job_queue.put(job)
break
for result in worker_fn(worker_id, job):
result_queue.put(result)
result_queue.put(None)
def run_jobs_in_parallel(
all_jobs: List[JobType],
worker_fn: Callable[[int, JobType], Iterable[ResultType]],
received_result_callback: Callable[[ResultType], None],
finished_callback: Callable[[], None],
result_queue_size: int = 100,
) -> None:
"""
Run jobs in parallel and uses callbacks to collect results.
:param all_jobs: Job descriptions; one at a time will be parsed into worker_fn.
:param worker_fn: Worker function receiving a job; many copies may run in parallel.
Can yield results, which will be processed (one at a time) by received_result_callback.
:param received_result_callback: Called when a result was produced by any worker. Only one will run at a time.
:param finished_callback: Called when all jobs have been processed.
"""
job_queue: multiprocessing.Queue = multiprocessing.Queue(len(all_jobs) + 1)
for job in all_jobs:
job_queue.put(job)
job_queue.put(None) # Marker that we are done
# This will hold the actual results:
result_queue: multiprocessing.Queue = multiprocessing.Queue(result_queue_size)
# Create workers:
num_workers = multiprocessing.cpu_count() - 1
workers = [
multiprocessing.Process(target=__parallel_queue_worker, args=(worker_id, job_queue, result_queue, worker_fn))
for worker_id in range(num_workers)
]
for worker in workers:
worker.start()
num_workers_finished = 0
while True:
result = result_queue.get()
if result is None:
num_workers_finished += 1
if num_workers_finished == len(workers):
finished_callback()
break
else:
received_result_callback(result)
for worker in workers:
worker.join()
|
face2rec2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
active_connection.py
|
from datetime import datetime
import select
import json
import threading
from flask import Blueprint, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_socketio import SocketIO
from blueprints.connection import Connection, connection_schema, connections_schema
db = SQLAlchemy()
ma = Marshmallow()
active_connection_bpr = Blueprint('active_connection_blueprint', __name__)
socketio = SocketIO(cors_allowed_origins='*')
event_index = 0
active_connections = [] # Array containing all currently active connections
def notify(payload):
global event_index
payload["timestamp"] = datetime.now().isoformat()[11:-4]
payload["index"] = event_index
socketio.emit('databaseEvent', payload)
event_index += 1
def events(connection):
con = connection.con
while True:
#If connection is closed from outside an exception is thrown on connection.poll()
try:
if select.select([con],[],[],5) != ([],[],[]):
seconds_passed = 0
con.poll()
con.commit()
while con.notifies:
payload = con.notifies.pop().payload
data = json.loads(payload)
data["database"] = connection.database
notify(data)
except Exception as e:
break
def is_connection_valid(connection):
"""
Checks if connection can be established
"""
try:
connection.get_connection()
return True
except:
return False
@active_connection_bpr.route("/connection/<int:id>/listen-start")
def listen_start(id):
print("Listen Start")
connection = Connection.query.get(id)
if is_connection_valid(connection):
if not any(active_connection.id == connection.id for active_connection in active_connections):
pg_connection = connection.get_connection()
pg_connection.create_general_notify_event()
pg_connection.cur.execute("LISTEN pg_change;")
connection.pg_connection = pg_connection
active_connections.append(connection)
thread = threading.Thread(target=events, args=(pg_connection,))
thread.start()
return jsonify({"status": "success"})
else:
return jsonify({
"status": "error",
"message": "Connection already active"
}), 400
else:
return jsonify({
"status": "error",
"message": "Connection can't be established"
}), 400
@active_connection_bpr.route("/connection/<int:id>/listen-end")
def listen_end(id):
conEnd = None
# Connection has to be fetched from active connections
# Running connection are not persisted in the database
for connection in active_connections:
if connection.id == id:
connection.pg_connection.cur.execute("UNLISTEN pg_change;")
connection.pg_connection.cur.close()
connection.pg_connection.con.close()
conEnd = connection
if conEnd:
active_connections.remove(conEnd)
return jsonify({
"status": "success",
"message": f"Connection {conEnd.id} successfully unlistened."
})
else:
return jsonify({
"status": "warning",
"message": f"Connection {id} was not active."
}), 400
@active_connection_bpr.route("/connection/<int:id>/status")
def connection_status(id):
for connection in active_connections:
if connection.id == id:
return jsonify({"connected": True})
return jsonify({"connected": False})
@active_connection_bpr.route("/connection/all/active")
def all_active_connections():
return connections_schema.jsonify(active_connections)
@active_connection_bpr.route('/connection/all', methods=['GET'])
def get_all_connections():
all_connections = Connection.query.all()
return connections_schema.jsonify(all_connections)
@active_connection_bpr.route("/connection/<int:id>/trigger", methods=["GET"])
def get_triggers(id):
for connection in active_connections:
if connection.id == id:
con = connection.get_connection()
response_data = jsonify(con.get_all_tables_with_trigger())
con.close()
return response_data
return jsonify({
"status": "error",
"message": "Triggers could not be fetched"
}), 400
@active_connection_bpr.route("/connection/<int:id>/trigger", methods=["POST"])
def create_trigger(id):
for connection in active_connections:
if connection.id == id:
con = connection.get_connection()
response_data = jsonify(con.set_trigger_for_tables(request.json))
con.close()
return response_data
return jsonify({
"status": "error",
"message": "Triggers could not be created"
}), 400
@active_connection_bpr.route("/connection/reset-index", methods=["GET"])
def reset_index():
global event_index
event_index = 0
return jsonify({
"status": "success",
"message": "Index reset successful."
}), 200
|
opencti_connector_helper.py
|
import datetime
import threading
import queue
import uuid
import requests
import pika
import logging
import json
import time
import base64
import os
from typing import Callable, Dict, Optional, Union
from sseclient import SSEClient
from pika.exceptions import UnroutableError, NackError
from pycti.api.opencti_api_client import OpenCTIApiClient
from pycti.connector.opencti_connector import OpenCTIConnector
from pycti.utils.opencti_stix2_splitter import OpenCTIStix2Splitter
EVENTS_QUEUE = queue.Queue()
def get_config_variable(
env_var: str,
yaml_path: list,
config: Dict = {},
isNumber: Optional[bool] = False,
default=None,
) -> Union[bool, int, None, str]:
"""[summary]
:param env_var: environnement variable name
:param yaml_path: path to yaml config
:param config: client config dict, defaults to {}
:param isNumber: specify if the variable is a number, defaults to False
"""
if os.getenv(env_var) is not None:
result = os.getenv(env_var)
elif yaml_path is not None:
if yaml_path[0] in config and yaml_path[1] in config[yaml_path[0]]:
result = config[yaml_path[0]][yaml_path[1]]
else:
return default
else:
return default
if result == "yes" or result == "true" or result == "True":
return True
elif result == "no" or result == "false" or result == "False":
return False
elif isNumber:
return int(result)
else:
return result
class ListenQueue(threading.Thread):
"""Main class for the ListenQueue used in OpenCTIConnectorHelper
:param helper: instance of a `OpenCTIConnectorHelper` class
:type helper: OpenCTIConnectorHelper
:param config: dict containing client config
:type config: dict
:param callback: callback function to process queue
:type callback: callable
"""
def __init__(self, helper, config: dict, callback):
threading.Thread.__init__(self)
self.pika_credentials = None
self.pika_parameters = None
self.pika_connection = None
self.channel = None
self.helper = helper
self.callback = callback
self.host = config["connection"]["host"]
self.port = config["connection"]["port"]
self.user = config["connection"]["user"]
self.password = config["connection"]["pass"]
self.queue_name = config["listen"]
# noinspection PyUnusedLocal
def _process_message(self, channel, method, properties, body):
"""process a message from the rabbit queue
:param channel: channel instance
:type channel: callable
:param method: message methods
:type method: callable
:param properties: unused
:type properties: str
:param body: message body (data)
:type body: str or bytes or bytearray
"""
json_data = json.loads(body)
thread = threading.Thread(target=self._data_handler, args=[json_data])
thread.start()
while thread.is_alive(): # Loop while the thread is processing
self.pika_connection.sleep(1.0)
logging.info(
"Message (delivery_tag="
+ str(method.delivery_tag)
+ ") processed, thread terminated"
)
channel.basic_ack(delivery_tag=method.delivery_tag)
def _data_handler(self, json_data):
# Set the API headers
work_id = json_data["internal"]["work_id"]
applicant_id = json_data["internal"]["applicant_id"]
self.helper.work_id = work_id
if applicant_id is not None:
self.helper.applicant_id = applicant_id
self.helper.api.set_applicant_id_header(applicant_id)
# Execute the callback
try:
self.helper.api.work.to_received(
work_id, "Connector ready to process the operation"
)
message = self.callback(json_data["event"])
self.helper.api.work.to_processed(work_id, message)
except Exception as e:
logging.exception("Error in message processing, reporting error to API")
try:
self.helper.api.work.to_processed(work_id, str(e), True)
except:
logging.error("Failing reporting the processing")
def run(self):
while True:
try:
# Connect the broker
self.pika_credentials = pika.PlainCredentials(self.user, self.password)
self.pika_parameters = pika.ConnectionParameters(
self.host, self.port, "/", self.pika_credentials
)
self.pika_connection = pika.BlockingConnection(self.pika_parameters)
self.channel = self.pika_connection.channel()
self.channel.basic_consume(
queue=self.queue_name, on_message_callback=self._process_message
)
self.channel.start_consuming()
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("Connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
time.sleep(10)
class PingAlive(threading.Thread):
def __init__(self, connector_id, api, get_state, set_state):
threading.Thread.__init__(self)
self.connector_id = connector_id
self.in_error = False
self.api = api
self.get_state = get_state
self.set_state = set_state
def ping(self):
while True:
try:
initial_state = self.get_state()
result = self.api.connector.ping(self.connector_id, initial_state)
remote_state = (
json.loads(result["connector_state"])
if len(result["connector_state"]) > 0
else None
)
if initial_state != remote_state:
self.set_state(result["connector_state"])
logging.info(
'Connector state has been remotely reset to: "'
+ self.get_state()
+ '"'
)
if self.in_error:
self.in_error = False
logging.error("API Ping back to normal")
except Exception:
self.in_error = True
logging.error("Error pinging the API")
time.sleep(40)
def run(self):
logging.info("Starting ping alive thread")
self.ping()
class StreamCatcher(threading.Thread):
def __init__(
self,
opencti_url,
opencti_token,
connector_last_event_id,
last_event_id,
stream_connection_id,
):
threading.Thread.__init__(self)
self.opencti_url = opencti_url
self.opencti_token = opencti_token
self.connector_last_event_id = connector_last_event_id
self.last_event_id = last_event_id
self.stream_connection_id = stream_connection_id
def get_range(self, from_id):
payload = {
"from": from_id,
"size": 2000,
"connectionId": self.stream_connection_id,
}
headers = {"Authorization": "Bearer " + self.opencti_token}
r = requests.post(
self.opencti_url + "/stream/history", json=payload, headers=headers
)
return r.json()["lastEventId"]
def run(self):
from_event_id = self.connector_last_event_id
from_event_timestamp = 0
last_event_timestamp = int(self.last_event_id.split("-")[0])
while (
from_event_timestamp <= last_event_timestamp
and from_event_id != self.last_event_id
):
from_event_id = self.get_range(from_event_id)
from_event_timestamp = int(from_event_id.split("-")[0])
logging.info("Events catchup requests done.")
class StreamProcessor(threading.Thread):
def __init__(self, message_callback, get_state, set_state):
threading.Thread.__init__(self)
self.message_callback = message_callback
self.get_state = get_state
self.set_state = set_state
def run(self):
logging.info("All old events processed, consuming is now LIVE!")
while True:
msg = EVENTS_QUEUE.get(block=True, timeout=None)
self.message_callback(msg)
state = self.get_state()
if state is not None:
state["connectorLastEventId"] = msg.id
self.set_state(state)
else:
self.set_state({"connectorLastEventId": msg.id})
class OpenCTIConnectorHelper:
"""Python API for OpenCTI connector
:param config: Dict standard config
:type config: dict
"""
def __init__(self, config: dict):
# Load API config
self.opencti_url = get_config_variable(
"OPENCTI_URL", ["opencti", "url"], config
)
self.opencti_token = get_config_variable(
"OPENCTI_TOKEN", ["opencti", "token"], config
)
self.opencti_ssl_verify = get_config_variable(
"OPENCTI_SSL_VERIFY", ["opencti", "ssl_verify"], config, False, True
)
# Load connector config
self.connect_id = get_config_variable(
"CONNECTOR_ID", ["connector", "id"], config
)
self.connect_type = get_config_variable(
"CONNECTOR_TYPE", ["connector", "type"], config
)
self.connect_name = get_config_variable(
"CONNECTOR_NAME", ["connector", "name"], config
)
self.connect_confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
True,
)
self.connect_scope = get_config_variable(
"CONNECTOR_SCOPE", ["connector", "scope"], config
)
self.connect_auto = get_config_variable(
"CONNECTOR_AUTO", ["connector", "auto"], config, False, False
)
self.log_level = get_config_variable(
"CONNECTOR_LOG_LEVEL", ["connector", "log_level"], config
)
# Configure logger
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: " + self.log_level)
logging.basicConfig(level=numeric_level)
# Initialize configuration
self.api = OpenCTIApiClient(
self.opencti_url, self.opencti_token, self.log_level
)
# Register the connector in OpenCTI
self.connector = OpenCTIConnector(
self.connect_id,
self.connect_name,
self.connect_type,
self.connect_scope,
self.connect_auto,
)
connector_configuration = self.api.connector.register(self.connector)
logging.info("Connector registered with ID:" + self.connect_id)
self.connector_id = connector_configuration["id"]
self.work_id = None
self.applicant_id = connector_configuration["connector_user"]["id"]
self.connector_state = connector_configuration["connector_state"]
self.config = connector_configuration["config"]
# Start ping thread
self.ping = PingAlive(
self.connector.id, self.api, self.get_state, self.set_state
)
self.ping.start()
def get_name(self):
return self.connect_name
def set_state(self, state) -> None:
"""sets the connector state
:param state: state object
:type state: dict
"""
self.connector_state = json.dumps(state)
def get_state(self):
"""get the connector state
:return: returns the current state of the connector if there is any
:rtype:
"""
try:
return (
None
if self.connector_state is None
else json.loads(self.connector_state)
)
except:
return None
def listen(self, message_callback: Callable[[str, Dict], str]) -> None:
"""listen for messages and register callback function
:param message_callback: callback function to process messages
:type message_callback: Callable[[Dict], List[str]]
"""
listen_queue = ListenQueue(self, self.config, message_callback)
listen_queue.start()
def listen_stream(
self, message_callback, url=None, token=None, verify=None
) -> None:
"""listen for messages and register callback function
:param message_callback: callback function to process messages
"""
current_state = self.get_state()
if current_state is None:
current_state = {"connectorLastEventId": "-"}
# Get the last event ID with the "connected" event msg
if url is not None and token is not None:
opencti_ssl_verify = verify if verify is not None else True
logging.info(
"Starting listening stream events with SSL verify to: "
+ str(opencti_ssl_verify)
)
messages = SSEClient(
url + "/stream",
headers={"Authorization": "Bearer " + token},
verify=opencti_ssl_verify,
)
else:
logging.info(
"Starting listening stream events with SSL verify to: "
+ str(self.opencti_ssl_verify)
)
messages = SSEClient(
self.opencti_url + "/stream",
headers={"Authorization": "Bearer " + self.opencti_token},
verify=self.opencti_ssl_verify,
)
# Create processor thread
processor_thread = StreamProcessor(
message_callback, self.get_state, self.set_state
)
last_event_id = None
for msg in messages:
try:
data = json.loads(msg.data)
except:
logging.error("Failed to load JSON: " + msg.data)
continue
if msg.event == "heartbeat":
logging.info("HEARTBEAT:" + str(msg))
continue
elif msg.event == "connected":
last_event_id = data["lastEventId"]
stream_connection_id = data["connectionId"]
# Launch processor if up to date
if current_state["connectorLastEventId"] == last_event_id:
processor_thread.start()
# Launch catcher if not up to date
if last_event_id != current_state["connectorLastEventId"]:
logging.info(
"Some events have not been processed, catching them..."
)
if url is not None and token is not None:
catcher_thread = StreamCatcher(
url,
token,
current_state["connectorLastEventId"],
last_event_id,
stream_connection_id,
)
else:
catcher_thread = StreamCatcher(
self.opencti_url,
self.opencti_token,
current_state["connectorLastEventId"],
last_event_id,
stream_connection_id,
)
catcher_thread.start()
else:
# If receiving the last message, launch processor
if msg.id == last_event_id:
message_callback(msg)
processor_thread.start()
elif "catchup" not in data:
EVENTS_QUEUE.put(msg)
else:
message_callback(msg)
state = self.get_state()
if state is not None:
state["connectorLastEventId"] = msg.id
self.set_state(state)
else:
self.set_state({"connectorLastEventId": msg.id})
def get_opencti_url(self):
return self.opencti_url
def get_opencti_token(self):
return self.opencti_token
def get_connector(self):
return self.connector
def log_error(self, msg):
logging.error(msg)
def log_info(self, msg):
logging.info(msg)
def date_now(self) -> str:
"""get the current date (UTC)
:return: current datetime for utc
:rtype: str
"""
return (
datetime.datetime.utcnow()
.replace(microsecond=0, tzinfo=datetime.timezone.utc)
.isoformat()
)
# Push Stix2 helper
def send_stix2_bundle(self, bundle, **kwargs) -> list:
"""send a stix2 bundle to the API
:param work_id: a valid work id
:param bundle: valid stix2 bundle
:type bundle:
:param entities_types: list of entities, defaults to None
:type entities_types: list, optional
:param update: whether to updated data in the database, defaults to False
:type update: bool, optional
:raises ValueError: if the bundle is empty
:return: list of bundles
:rtype: list
"""
work_id = kwargs.get("work_id", self.work_id)
entities_types = kwargs.get("entities_types", None)
update = kwargs.get("update", False)
if entities_types is None:
entities_types = []
stix2_splitter = OpenCTIStix2Splitter()
bundles = stix2_splitter.split_bundle(bundle)
if len(bundles) == 0:
raise ValueError("Nothing to import")
if work_id is not None:
self.api.work.add_expectations(work_id, len(bundles))
pika_credentials = pika.PlainCredentials(
self.config["connection"]["user"], self.config["connection"]["pass"]
)
pika_parameters = pika.ConnectionParameters(
self.config["connection"]["host"],
self.config["connection"]["port"],
"/",
pika_credentials,
)
pika_connection = pika.BlockingConnection(pika_parameters)
channel = pika_connection.channel()
for sequence, bundle in enumerate(bundles, start=1):
self._send_bundle(
channel,
bundle,
work_id=work_id,
entities_types=entities_types,
sequence=sequence,
update=update,
)
channel.close()
return bundles
def _send_bundle(self, channel, bundle, **kwargs) -> None:
"""send a STIX2 bundle to RabbitMQ to be consumed by workers
:param channel: RabbitMQ channel
:type channel: callable
:param bundle: valid stix2 bundle
:type bundle:
:param entities_types: list of entity types, defaults to None
:type entities_types: list, optional
:param update: whether to update data in the database, defaults to False
:type update: bool, optional
"""
work_id = kwargs.get("work_id", None)
sequence = kwargs.get("sequence", 0)
update = kwargs.get("update", False)
entities_types = kwargs.get("entities_types", None)
if entities_types is None:
entities_types = []
# Validate the STIX 2 bundle
# validation = validate_string(bundle)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON')
# Prepare the message
# if self.current_work_id is None:
# raise ValueError('The job id must be specified')
message = {
"applicant_id": self.applicant_id,
"action_sequence": sequence,
"entities_types": entities_types,
"content": base64.b64encode(bundle.encode("utf-8")).decode("utf-8"),
"update": update,
}
if work_id is not None:
message["work_id"] = work_id
# Send the message
try:
routing_key = "push_routing_" + self.connector_id
channel.basic_publish(
exchange=self.config["push_exchange"],
routing_key=routing_key,
body=json.dumps(message),
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
),
)
logging.info("Bundle has been sent")
except (UnroutableError, NackError) as e:
logging.error("Unable to send bundle, retry...", e)
self._send_bundle(channel, bundle, **kwargs)
def split_stix2_bundle(self, bundle) -> list:
"""splits a valid stix2 bundle into a list of bundles
:param bundle: valid stix2 bundle
:type bundle:
:raises Exception: if data is not valid JSON
:return: returns a list of bundles
:rtype: list
"""
self.cache_index = {}
self.cache_added = []
try:
bundle_data = json.loads(bundle)
except:
raise Exception("File data is not a valid JSON")
# validation = validate_parsed_json(bundle_data)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON:' + bundle)
# Index all objects by id
for item in bundle_data["objects"]:
self.cache_index[item["id"]] = item
bundles = []
# Reports must be handled because of object_refs
for item in bundle_data["objects"]:
if item["type"] == "report":
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_report_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Relationships not added in previous reports
for item in bundle_data["objects"]:
if item["type"] == "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_relationship_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Entities not added in previous reports and relationships
for item in bundle_data["objects"]:
if item["type"] != "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_entity_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
return bundles
def stix2_get_embedded_objects(self, item) -> dict:
"""gets created and marking refs for a stix2 item
:param item: valid stix2 item
:type item:
:return: returns a dict of created_by of object_marking_refs
:rtype: dict
"""
# Marking definitions
object_marking_refs = []
if "object_marking_refs" in item:
for object_marking_ref in item["object_marking_refs"]:
if object_marking_ref in self.cache_index:
object_marking_refs.append(self.cache_index[object_marking_ref])
# Created by ref
created_by_ref = None
if "created_by_ref" in item and item["created_by_ref"] in self.cache_index:
created_by_ref = self.cache_index[item["created_by_ref"]]
return {
"object_marking_refs": object_marking_refs,
"created_by_ref": created_by_ref,
}
def stix2_get_entity_objects(self, entity) -> list:
"""process a stix2 entity
:param entity: valid stix2 entity
:type entity:
:return: entity objects as list
:rtype: list
"""
items = [entity]
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(entity)
# Add created by ref
if embedded_objects["created_by_ref"] is not None:
items.append(embedded_objects["created_by_ref"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_relationship_objects(self, relationship) -> list:
"""get a list of relations for a stix2 relationship object
:param relationship: valid stix2 relationship
:type relationship:
:return: list of relations objects
:rtype: list
"""
items = [relationship]
# Get source ref
if relationship["source_ref"] in self.cache_index:
items.append(self.cache_index[relationship["source_ref"]])
# Get target ref
if relationship["target_ref"] in self.cache_index:
items.append(self.cache_index[relationship["target_ref"]])
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(relationship)
# Add created by ref
if embedded_objects["created_by"] is not None:
items.append(embedded_objects["created_by"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_report_objects(self, report) -> list:
"""get a list of items for a stix2 report object
:param report: valid stix2 report object
:type report:
:return: list of items for a stix2 report object
:rtype: list
"""
items = [report]
# Add all object refs
for object_ref in report["object_refs"]:
items.append(self.cache_index[object_ref])
for item in items:
if item["type"] == "relationship":
items = items + self.stix2_get_relationship_objects(item)
else:
items = items + self.stix2_get_entity_objects(item)
return items
@staticmethod
def stix2_deduplicate_objects(items) -> list:
"""deduplicate stix2 items
:param items: valid stix2 items
:type items:
:return: de-duplicated list of items
:rtype: list
"""
ids = []
final_items = []
for item in items:
if item["id"] not in ids:
final_items.append(item)
ids.append(item["id"])
return final_items
@staticmethod
def stix2_create_bundle(items):
"""create a stix2 bundle with items
:param items: valid stix2 items
:type items:
:return: JSON of the stix2 bundle
:rtype:
"""
bundle = {
"type": "bundle",
"id": "bundle--" + str(uuid.uuid4()),
"spec_version": "2.0",
"objects": items,
}
return json.dumps(bundle)
@staticmethod
def check_max_tlp(tlp, max_tlp) -> bool:
"""check the allowed TLP levels for a TLP string
:param tlp: string for TLP level to check
:type tlp: str
:param max_tlp: the highest allowed TLP level
:type max_tlp: str
:return: list of allowed TLP levels
:rtype: bool
"""
allowed_tlps = ["TLP:WHITE"]
if max_tlp == "TLP:RED":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER", "TLP:RED"]
elif max_tlp == "TLP:AMBER":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER"]
elif max_tlp == "TLP:GREEN":
allowed_tlps = ["TLP:WHITE", "TLP:GREEN"]
return tlp in allowed_tlps
|
main.py
|
import socket
import threading
import pickle
import enum
import random
import math
import time
import sys
import pygame
BUFFER_SIZE = 4096
class MessageType(enum.Enum):
GAME_INFO_REQUEST = 0
GAME_INFO_SEND = 1
PLAYER_INFO_BROADCAST = 2
PLAYER_INFO = 3
NEW_PLAYER_INFO = 4
class Timer:
def __init__(self, start=False):
self.paused = not start
self.time_elapsed = 0
self.__time_point = time.time()
def reset(self):
self.time_elapsed = 0
def pause(self):
self.paused = True
def resume(self):
self.paused = False
def start(self):
self.reset()
self.resume()
def stop(self):
self.reset()
self.pause()
def update(self):
if not self.paused:
self.time_elapsed += time.time() - self.__time_point
self.__time_point = time.time()
class Message:
def __init__(self, type, data=None):
self.type = type
self.data = data
class PlayerState(enum.Enum):
CURRENT = 0
ONLINE = 1
OFFLINE = 2
DEAD = 3
class PlayerConnection:
def __init__(self, sock, addr, player):
self.sock = sock
self.address = addr
self.player = player
self.__active = True
def disconnect(self):
self.sock.close()
self.player.state = PlayerState.OFFLINE
self.__active = False
def is_active(self):
return self.__active
class Bullet:
def __init__(self,
owner_id,
position=pygame.math.Vector2(),
velocity=pygame.math.Vector2(),
angle=0):
self.owner_id = owner_id
self.position = pygame.math.Vector2(position)
self.velocity = pygame.math.Vector2(velocity)
self.angle = angle
self.speed = 5
self.rect = pygame.Rect(self.position[0], self.position[1], 5, 5)
self.destroyed = False
self.start_position = pygame.math.Vector2(self.position)
def traveled_distance(self):
vector = self.start_position - self.position
distance = (vector[0] ** 2 + vector[1] ** 2) ** 0.5
return distance
def update(self, game):
self.position += self.velocity
self.rect = pygame.Rect(self.position[0], self.position[1], 5, 5)
if self.traveled_distance() > 500:
self.destroyed = True
players = [p for p in game.players if p.id != self.owner_id]
for player in players:
if self.rect.colliderect(player.rect):
player.state = PlayerState.DEAD
player.velocity += self.velocity / 2
self.destroyed = True
def draw(self, surface):
size = (5, 5)
image = pygame.Surface(size, pygame.SRCALPHA, 32)
image.fill((255, 0, 255))
image = pygame.transform.rotate(image, self.angle)
surface.blit(image, (self.position[0] - image.get_size()[0] / 2,
self.position[1] - image.get_size()[1] / 2))
def dump_info(self):
info = {
'position': self.position,
'velocity': self.velocity,
'angle': self.angle,
'destroyed': self.destroyed,
'owner_id': self.owner_id
}
return info
def load_info(self, info):
self.position = info['position']
self.velocity = info['velocity']
self.angle = info['angle']
self.destroyed = info['destroyed']
self.owner_id = info['owner_id']
class Player:
def __init__(self, id):
self.id = id
self.position = pygame.math.Vector2(0, 0)
self.velocity = pygame.math.Vector2(0, 0)
self.angle = 0
self.speed = 2
self.state = PlayerState.ONLINE
self.rect = pygame.Rect(self.position[0], self.position[1], 32, 32)
self.control_left = False
self.control_right = False
self.control_up = False
self.control_down = False
self.control_lmbutton = False
self.attacking = False
self.attack_cooldown = 0.2
self.attack_timer = Timer()
def update(self, game):
self.position += self.velocity
self.rect.center = self.position
self.velocity *= 0.9
self.attack_timer.update()
if self.attack_timer.time_elapsed > self.attack_cooldown:
self.attack_timer.stop()
if self.control_lmbutton:
if not self.attacking and self.attack_timer.paused:
self.attacking = True
self.attack_timer.start()
else:
self.attacking = False
if self.control_left:
self.velocity[0] = -self.speed
if self.control_right:
self.velocity[0] = self.speed
if self.control_up:
self.velocity[1] = -self.speed
if self.control_down:
self.velocity[1] = self.speed
def draw(self, surface, pivot):
if self.state == PlayerState.CURRENT:
color = (255, 0, 0)
elif self.state == PlayerState.ONLINE:
color = (0, 0, 255)
elif self.state == PlayerState.OFFLINE:
color = (96, 96, 96)
elif self.state == PlayerState.DEAD:
color = (50, 50, 50)
image = pygame.Surface((32, 32), pygame.SRCALPHA, 32)
image.fill(color=color)
#screen_center = pygame.math.Vector2(surface.get_size()) / 2
#vector = self.position - pivot
#relative_position = screen_center + vector
image = pygame.transform.rotate(image, self.angle)
surface.blit(image, (self.position[0] - image.get_size()[0] / 2,
self.position[1] - image.get_size()[1] / 2))
def dump_info(self):
info = {
'id': self.id,
'position': self.position,
'velocity': self.velocity,
'angle': self.angle,
'state': self.state,
'attacking': self.attacking
}
return info
def load_info(self, info):
self.id = info['id']
self.position = info['position']
self.velocity = info['velocity']
self.angle = info['angle']
self.state = info['state']
self.attacking = info['attacking']
def turn_to(self, point):
rel_x, rel_y = point - self.position
self.angle = -math.degrees(math.atan2(rel_y, rel_x))
def control(self, event):
if self.state != PlayerState.DEAD:
if event.type == pygame.KEYDOWN:
if event.key == ord('a'):
self.control_left = True
elif event.key == ord('d'):
self.control_right = True
elif event.key == ord('w'):
self.control_up = True
elif event.key == ord('s'):
self.control_down = True
elif event.type == pygame.KEYUP:
if event.key == ord('a'):
self.control_left = False
elif event.key == ord('d'):
self.control_right = False
elif event.key == ord('w'):
self.control_up = False
elif event.key == ord('s'):
self.control_down = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.control_lmbutton = True
elif event.type == pygame.MOUSEBUTTONUP:
self.control_lmbutton = False
class Game:
def __init__(self):
self.players = []
self.bullets = []
def update(self):
for player in self.players:
player.update(self)
for bullet in self.bullets:
bullet.update(self)
self.bullets = [b for b in self.bullets if not b.destroyed]
def draw(self, surface, center):
surface.fill((30, 30, 30))
for player in self.players:
player.draw(surface, center)
for bullet in self.bullets:
bullet.draw(surface)
def dump_info(self):
info = {
'players': [p.dump_info() for p in self.players],
'bullets': [b.dump_info() for b in self.bullets],
}
return info
def load_info(self, info):
for pi in info['players']:
player_exists = False
for p in self.players:
if p.id == pi['id']:
player_exists = True
p.load_info(pi)
break
if not player_exists:
player = Player(len(self.players))
player.load_info(pi)
self.players.append(player)
self.bullets = []
for bi in info['bullets']:
b = Bullet(bi['owner_id'])
b.load_info(bi)
self.bullets.append(b)
class GameServer:
def __init__(self, server_address):
self.game = Game()
self.connections = []
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind(server_address)
self.server_socket.listen(1)
threading.Thread(target=self.accept_clients).start()
def accept_clients(self):
while True:
sock, address = self.server_socket.accept()
print("Client {} connected.".format(address))
player = Player(len(self.connections))
self.connections.append(PlayerConnection(sock, address, player))
self.game.players.append(player)
player.position = pygame.math.Vector2(random.randint(0, 400),
random.randint(0, 300))
player_info = player.dump_info()
message = Message(MessageType.NEW_PLAYER_INFO, player_info)
data = pickle.dumps(message)
sock.sendall(data)
def handle_message(self, player_conn, message):
if message.type == MessageType.GAME_INFO_REQUEST:
game_info = self.game.dump_info()
message = Message(MessageType.GAME_INFO_SEND, game_info)
data = pickle.dumps(message)
player_conn.sock.sendall(data)
elif message.type == MessageType.PLAYER_INFO:
player_info = message.data
if player_info['attacking']:
bullet = Bullet(player_info['id'])
angle = player_info['angle'] - 270
angle_radians = math.radians(angle)
velocity = pygame.math.Vector2(math.sin(angle_radians),
math.cos(angle_radians))
velocity *= bullet.speed
position = player_info['position'] + velocity
bullet = Bullet(owner_id=player_info['id'],
position=position,
velocity=velocity,
angle=angle)
self.game.bullets.append(bullet)
for player in self.game.players:
if player.state == PlayerState.CURRENT:
player.state = PlayerState.ONLINE
if player.id == player_info['id']:
player.load_info(player_info)
game_info = self.game.dump_info()
message = Message(MessageType.GAME_INFO_SEND, game_info)
data = pickle.dumps(message)
player_conn.sock.sendall(data)
def loop(self):
while True:
self.game.update()
for conn in [c for c in self.connections if c.is_active()]:
try:
data = conn.sock.recv(BUFFER_SIZE)
message = pickle.loads(data)
self.handle_message(conn, message)
except pickle.UnpicklingError:
pass
except ConnectionResetError:
print("Client {} disconnected.".format(conn.address))
conn.disconnect()
class GameClient:
def __init__(self, server_address):
self.player = None
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect(server_address)
self.game = Game()
data = self.client_socket.recv(BUFFER_SIZE)
message = pickle.loads(data)
self.handle_message(message)
pygame.init()
screen_size = (800, 600)
self.clock = pygame.time.Clock()
self.display = pygame.display.set_mode(screen_size)
pygame.display.set_caption('2D Multiplayer Test')
self.done = False
self.listening_thread = threading.Thread(target=self.listen_to_server)
self.listening_thread.daemon = True
self.listening_thread.start()
def listen_to_server(self):
while not self.done:
data = self.client_socket.recv(BUFFER_SIZE)
if data:
message = pickle.loads(data)
self.handle_message(message)
def handle_message(self, message):
if message.type == MessageType.NEW_PLAYER_INFO:
player_info = message.data
self.player = Player(player_info['id'])
self.player.load_info(player_info)
self.player.state = PlayerState.CURRENT
self.game.players.append(self.player)
elif message.type == MessageType.GAME_INFO_SEND:
player_info = self.player.dump_info()
if player_info['state'] == PlayerState.CURRENT:
player_info['state'] = PlayerState.ONLINE
game_info = message.data
self.game.load_info(game_info)
if player_info['state'] == PlayerState.ONLINE:
player_info['state'] = PlayerState.CURRENT
self.player.load_info(player_info)
def loop(self):
while not self.done:
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
self.player.control(event)
#self.game.draw(self.display,
# self.player.rect.center + self.player.position)
self.game.draw(self.display, pygame.math.Vector2(0, 0))
self.game.update()
if self.player.state != PlayerState.DEAD:
self.player.turn_to(mouse)
pygame.display.update()
self.clock.tick(60)
message = Message(MessageType.PLAYER_INFO, self.player.dump_info())
data = pickle.dumps(message)
self.client_socket.sendall(data)
pygame.quit()
def main():
server_address = ('localhost', 5000)
if len(sys.argv) > 1:
game_server = GameServer(server_address)
game_server.loop()
else:
game_client = GameClient(server_address)
game_client.loop()
if __name__ == "__main__":
main()
|
server.py
|
import socket
import threading
import sys
import psutil
import os
import sched
import time
import ipaddress
bind_ip = socket.gethostname()
bind_port = 9991
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start_server():
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Escutando no endereço: %s:%d" % (bind_ip, bind_port))
def list_directories_files (path):
list_files = []
for file in os.listdir(path):
list_files.append(file)
files_string = " | ".join((list_files))
return files_string
def list_file_permissions(path):
mask = oct(os.stat(path).st_mode)[-3:]
return mask
def list_process(name):
process_list = []
for proc in psutil.process_iter():
if proc.name() == name:
process_list.append("Nome: " + proc.name() + " Executável: " + proc.exe() + " PID: " + str(proc.pid))
process_string = " | ".join((process_list))
return process_string
def list_process_resources_consumption(pid):
consumption_list = []
consumption_list.append("Memória: " + str(round((psutil.Process(pid).memory_info().rss))/(1024*1024)) + "MB" + " | " +
"CPU: " + str(round((psutil.Process(pid).cpu_percent()), 2)) + "%" + " | " + "Tempo de CPU: " + str(psutil.Process(pid).cpu_times()))
consumption_string = " | ".join((consumption_list))
return consumption_string
def staggered_scheeduling():
start = time.time()
print("Iniciando as chamadas escalonadas.")
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(1, 1, get_network_interfaces, ())
scheduler.enter(2, 1, list_process, ("python3",))
scheduler.run()
end = time.time()
total = abs(end - start)
data = ("Finalizando as chamadas escalonadas..." + (f" Tempo total: {total:.0f} segundos"))
return data
def list_all_processes():
process_list = []
for proc in psutil.process_iter():
process_list.append("Nome: " + proc.name() + " Executável: " + proc.exe() + " PID: " + str(proc.pid))
process_string = " | ".join((process_list))
return process_string
def get_network_interfaces():
interfaces = []
for interface in psutil.net_if_addrs():
interfaces.append(interface)
interfaces_string = " | ".join((interfaces))
return interfaces_string
def get_network_interfaces_details(interface):
data = []
data.append("IPv4: " + psutil.net_if_addrs()[interface][0][1] + " | " + "IPv6: " + psutil.net_if_addrs()[interface][1][1] + " | " + "Máscara: " + psutil.net_if_addrs()[interface][0][2])
data_string = " ".join((data))
return data_string
def get_ports(ip):
data = os.system(f"nmap -p- {ip}")
return str(data)
def get_subnet_mask(ip):
data = []
net = ipaddress.ip_interface(ip)
data.append("IP: " + str(net.ip) + " | " + "Mascara: " + str(net.netmask) + " | " + "CIDR: " + str(net.network).split('/')[1] + " | " + "Rede: " + str(net.network).split('/')[0] + " | " + "Broadcast: " + str(net.network.broadcast_address))
data_string = " ".join((data))
return data_string
def scan_devices_in_network(subnet):
data = []
data.append(os.system(f"nmap -sP {subnet}"))
data_string = " ".join(str(data))
return data_string
def get_interface_consumption(interface):
data = []
ipv4 = psutil.net_if_addrs()[interface][0][1]
ipv6 = psutil.net_if_addrs()[interface][1][1]
bytesecv = psutil.net_io_counters(pernic=True)[interface].bytes_recv
bytesecs = psutil.net_io_counters(pernic=True)[interface].bytes_sent
data.append("IPv4: " + ipv4 + " | " + "IPv6: " + ipv6 + " | " + "Bytes Recebidos: " + str(bytesecv) + " | " + "Bytes Enviados: " + str(bytesecs))
data_string = " ".join((data))
return data_string
def get_process_consumption(pid):
data = []
memory = str("Memória" + str(round((psutil.Process(pid).memory_info().rss))/(1024*1024), "MB"))
cpu = str("CPU: ", str(psutil.Process(pid).cpu_percent(), "%"))
data.append(memory + " | " + cpu )
data_string = " ".join((data))
return data_string
def handle_client(client_socket):
request = client_socket.recv(1024)
print("[*] Recebido: %s" % request)
request_data = request.decode('utf-8')
data = request_data.split(",")
if data[0] == "1":
data = staggered_scheeduling()
client_socket.send(bytes(data, 'utf-8'))
elif data[0] == "2":
interfaces = get_network_interfaces()
client_socket.send(bytes(interfaces, 'utf-8'))
sys.exit()
print(client_socket.getpeername())
client_socket.close()
def main():
start_server()
while True:
client, addr = server.accept()
print("[*] Nova conexão realizada pelo cliente %s:%d" % (addr[0], addr[1]))
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
main()
if __name__ == "__main__":
sys.exit(main())
|
datasets.py
|
import glob
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import math
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
self.label_files = [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
server.py
|
import threading
import os
import sys
import time
import datetime
import io
import re
import base64
import uuid
import queue
import logging
import traceback
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import pickle
from flask import Flask, send_from_directory, jsonify, request
from flask_cors import CORS, cross_origin
#Queue for process prediction
prediction_queue = queue.Queue()
#Dict for result OCR
result_dict = dict()
def predict_worker():
while True:
# process predict request
if not prediction_queue.empty():
predict_request = prediction_queue.get()
print('found queue')
if 'id' in predict_request and 'data' in predict_request:
print('process:' + predict_request['id'])
try:
prediction_result = predict(predict_request['data'])
result_dict[predict_request['id']] = {'result': prediction_result.to_csv(index=False),
'time': datetime.datetime.now(),
'error': None }
print('success:' + predict_request['id'])
except Exception as e:
result_dict[predict_request['id']] = {'result': None,
'time': datetime.datetime.now(),
'error': traceback.format_exc() }
print('failure:' + predict_request['id'])
# clean up old results
for key in list(result_dict.keys()):
result_time = result_dict[key]['time']
if (datetime.datetime.now() - result_time).total_seconds() > 300:
del result_dict[key]
print('result of request id[' + key + '] now removed')
time.sleep(0.1)
prediction_thread = threading.Thread(target=predict_worker)
prediction_thread.start()
#model読み込み
with open('lgb_model.pickle', mode='rb') as fp:
model = pickle.load(fp)
def predict(test_data):
#データ読み込み
df_test = None
if test_data is None:
df_test = pd.read_csv("test.csv")
else:
df_test = pd.read_csv(io.StringIO(test_data))
df_test.index = df_test["Id"]
df_test.drop("Id", axis = 1, inplace = True)
df_test = pd.get_dummies(df_test, drop_first=True)
#予測
prediction_LG = model.predict(df_test, predict_disable_shape_check = True)
#小数を丸めている
prediction_LG = np.round(prediction_LG)
results = pd.DataFrame({"id": df_test.index, "SalePrice": prediction_LG})
return results
app = Flask(__name__, static_folder='node_modules')
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
CORS(app)
@app.route('/')
def root_html():
return send_from_directory(os.path.abspath(os.path.dirname(__file__)),'predict.html')
@app.route('/predict.js')
def root_js():
return send_from_directory(os.path.abspath(os.path.dirname(__file__)),'predict.js')
@app.route('/default_data')
def default_data():
return send_from_directory(os.path.abspath(os.path.dirname(__file__)),'test.csv')
@app.route('/predict', methods=['POST'])
@cross_origin(origin='*')
def process_predict():
try:
data = request.get_json()['data']
predict_request_id = 'predict_id-' + str(uuid.uuid4())
print('request id:' + predict_request_id + ' created')
prediction_queue.put({'id': predict_request_id,
'data': data})
return jsonify({'status': 'success',
'requestid': predict_request_id})
except Exception as e:
return jsonify({'status': 'error',
'requestid': None})
@app.route('/result')
@cross_origin(origin='*')
def process_result():
id = request.args['requestid']
if id in result_dict:
if result_dict[id]['result'] is not None:
return jsonify({'status': 'success',
'message': '',
'result': result_dict[id]['result']})
else:
return jsonify({'status': 'error',
'message': result_dict[id]['error'],
'result': None})
else:
return jsonify({'status': 'not found'})
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, threaded=True)
|
mock_athenamp.py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Taylor Childers (john.taylor.childers@cern.ch)
#!/usr/bin/env python
import os
import time
import logging
import multiprocessing
from pandayoda.common import serializer
logger = logging.getLogger(__name__)
try:
import yampl
except Exception:
logger.exception("Failed to import yampl")
raise
ATHENA_READY_FOR_EVENTS = 'Ready for events'
NO_MORE_EVENTS = 'No more events'
class AthenaCommunicator:
""" small class to handle yampl communication exception handling """
def __init__(self, socketname='EventService_EventRanges', context='local'):
# create server socket for yampl
try:
self.socket = yampl.ClientSocket(socketname, context)
except Exception:
logger.exception('failed to create yampl client socket')
raise
def send(self, message):
# send message using yampl
try:
self.socket.send_raw(message)
except Exception:
logger.exception("Failed to send yampl message: %s", message)
raise
def recv(self):
# receive yampl message
size, buf = self.socket.try_recv_raw()
if size == -1:
return ''
return str(buf)
def recv_block(self):
size, buf = self.socket.recv_raw()
if size == -1:
return ''
return str(buf)
def athenamp_worker():
logger.info('start athenamp_worker')
comm = AthenaCommunicator()
while True:
logger.info('start loop, athenamp worker')
logger.info('sending ready for events')
comm.send(ATHENA_READY_FOR_EVENTS)
logger.info('waiting for response')
msg = comm.recv_block()
logger.info('received: %s', msg)
if msg.startswith('['):
try:
_l = serializer.deserialize(msg)
except Exception:
logger.error('failed to deserialize msg')
continue
# received event ranges, sleep for a bit and return the file
time.sleep(5)
# return file info
# "/build1/tsulaia/20.3.7.5/run-es/athenaMP-workers-AtlasG4Tf-sim/worker_1/myHITS.pool.root_000.Range-6,ID:Range-6,CPU:1,WALL:1"
outputfilename = os.path.join(os.getcwd(), 'TEST' + _l['eventRangeID'] + '.ROOT')
msg = outputfilename + ',ID:' + _l['eventRangeID'] + ',CPU:1,WALL:1'
logger.info('sending output file: %s', msg)
comm.send(msg)
elif NO_MORE_EVENTS in msg:
break
logger.info('worker exiting')
def athenamp():
# get the number of workers that are suppose to be running
workers = int(os.environ['ATHENA_PROC_NUMBER'])
logger.info('workers %d', workers)
procs = []
for i in range(workers):
p = multiprocessing.Process(target=athenamp_worker)
p.start()
procs.append(p)
for p in procs:
p.join()
logger.info('exiting')
# testing this thread
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s|%(process)s|%(thread)s|%(levelname)s|%(name)s|%(funcName)s|%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Start mock athenamp')
#import argparse
#oparser = argparse.ArgumentParser()
#oparser.add_argument('-l','--jobWorkingDir', dest="jobWorkingDir", default=None, help="Job's working directory.",required=True)
#args = oparser.parse_args()
athenamp()
logger.info('exit mock')
'''
job_def = {
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"PandaID": 3298217817,
"StatusCode": 0,
"attemptNr": 3,
"checksum": "ad:363a57ab",
"cloud": "WORLD",
"cmtConfig": "x86_64-slc6-gcc47-opt",
"coreCount": 8,
"currentPriority": 851,
"ddmEndPointIn": "NERSC_DATADISK",
"ddmEndPointOut": "LRZ-LMU_DATADISK,NERSC_DATADISK",
"destinationDBlockToken": "dst:LRZ-LMU_DATADISK,dst:NERSC_DATADISK",
"destinationDblock": "mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.simul.HITS.e4376_s3022_
tid10919503_00_sub0384058277,mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.simul.log.e4376_s3022_
tid10919503_00_sub0384058278",
"destinationSE": "LRZ-LMU_C2PAP_MCORE",
"dispatchDBlockToken": "NULL",
"dispatchDBlockTokenForOut": "NULL,NULL",
"dispatchDblock": "panda.10919503.03.15.GEN.c2a897d6-ea51-4054-83a5-ce0df170c6e1_dis003287071386",
"eventService": "True",
"fileDestinationSE": "LRZ-LMU_C2PAP_MCORE,NERSC_Edison",
"fsize": "24805997",
"homepackage": "AtlasProduction/19.2.5.3",
"inFilePaths": "/scratch2/scratchdirs/dbenjami/harvester_edison/test-area/test-18/EVNT.06402143._000615.pool.root.1",
"inFiles": "EVNT.06402143._000615.pool.root.1",
"jobDefinitionID": 0,
"jobName": "mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.simul.e4376_s3022.3268661856",
"jobPars": "--inputEVNTFile=EVNT.06402143._000615.pool.root.1 --AMITag=s3022 --DBRelease=\"default:current\"
--DataRunNumber=222525 --conditionsTag \"default:OFLCOND-RUN12-SDR-19\" --firstEvent=1 --geometryVersion=
\"default:ATLAS-R2-2015-03-01-00_VALIDATION\" --maxEvents=1000 --outputHITSFile=HITS.10919503._000051.pool.root.1
--physicsList=FTFP_BERT --postInclude \"default:PyJobTransforms/UseFrontier.py\" --preInclude
\"EVNTtoHITS:SimulationJobOptions/preInclude.BeamPipeKill.py,SimulationJobOptions/preInclude.FrozenShowersFCalOnly.py,
AthenaMP/AthenaMP_EventService.py\" --randomSeed=611 --runNumber=362002 --simulator=MC12G4 --skipEvents=0 --truthStrategy=MC15aPlus",
"jobsetID": 3287071385,
"logFile": "log.10919503._000051.job.log.tgz.1.3298217817",
"logGUID": "6872598f-658b-4ecb-9a61-0e1945e44dac",
"maxCpuCount": 46981,
"maxDiskCount": 323,
"maxWalltime": 46981,
"minRamCount": 23750,
"nSent": 1,
"nucleus": "LRZ-LMU",
"outFiles": "HITS.10919503._000051.pool.root.1,log.10919503._000051.job.log.tgz.1.3298217817",
"processingType": "validation",
"prodDBlockToken": "NULL",
"prodDBlockTokenForOutput": "NULL,NULL",
"prodDBlocks": "mc15_13TeV:mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.evgen.EVNT.e4376/",
"prodSourceLabel": "managed",
"prodUserID": "glushkov",
"realDatasets": "mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.simul.HITS.e4376_s3022_tid10919503_00,
mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.simul.log.e4376_s3022_tid10919503_00",
"realDatasetsIn": "mc15_13TeV:mc15_13TeV.362002.Sherpa_CT10_Znunu_Pt0_70_CVetoBVeto_fac025.evgen.EVNT.e4376/",
"scopeIn": "mc15_13TeV",
"scopeLog": "mc15_13TeV",
"scopeOut": "mc15_13TeV",
"sourceSite": "NULL",
"swRelease": "Atlas-19.2.5",
"taskID": 10919503,
"transferType": "NULL",
"transformation": "Sim_tf.py"
}
eventranges = [
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-1-49",
"lastEvent": 1,
"scope": "mc15_13TeV",
"startEvent": 1
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-2-49",
"lastEvent": 2,
"scope": "mc15_13TeV",
"startEvent": 2
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-3-49",
"lastEvent": 3,
"scope": "mc15_13TeV",
"startEvent": 3
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-4-49",
"lastEvent": 4,
"scope": "mc15_13TeV",
"startEvent": 4
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-5-49",
"lastEvent": 5,
"scope": "mc15_13TeV",
"startEvent": 5
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-6-49",
"lastEvent": 6,
"scope": "mc15_13TeV",
"startEvent": 6
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-7-49",
"lastEvent": 7,
"scope": "mc15_13TeV",
"startEvent": 7
},
{
"GUID": "BEA4C016-E37E-0841-A448-8D664E8CD570",
"LFN": "EVNT.06402143._000615.pool.root.1",
"eventRangeID": "10919503-3298217817-8731829857-8-49",
"lastEvent": 8,
"scope": "mc15_13TeV",
"startEvent": 8
}
]
'''
|
testboxtasks.py
|
# -*- coding: utf-8 -*-
# $Id: testboxtasks.py $
"""
TestBox Script - Async Tasks.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
from datetime import datetime
import os
import re
import signal;
import sys
import subprocess
import threading
import time
# Validation Kit imports.
from common import constants
from common import utils;
from common import webutils;
import testboxcommons
# Figure where we are.
try: __file__
except: __file__ = sys.argv[0];
g_ksTestScriptDir = os.path.dirname(os.path.abspath(__file__));
class TestBoxBaseTask(object):
"""
Asynchronous task employing a thread to do the actual work.
"""
## Time to wait for a task to terminate.
kcSecTerminateTimeout = 60
def __init__(self, oTestBoxScript, cSecTimeout, fnThreadProc):
self._oTestBoxScript = oTestBoxScript;
self._cSecTimeout = cSecTimeout;
self._tsSecStarted = utils.timestampSecond();
self.__oRLock = threading.RLock();
self._oCv = threading.Condition(self.__oRLock);
self._fRunning = True; # Protected by lock.
self._fShouldTerminate = False; # Protected by lock.
# Spawn the worker thread.
self._oThread = threading.Thread(target=fnThreadProc);
self._oThread.daemon = True;
self._oThread.start();
def _lock(self):
""" Take the CV lock. """
self._oCv.acquire();
def _unlock(self):
""" Release the CV lock. """
self._oCv.release();
def _complete(self):
"""
Indicate that the task is complete, waking up the main thread.
Usually called at the end of the thread procedure.
"""
self._lock();
self._fRunning = False;
self._oCv.notifyAll();
self._unlock();
def isRunning(self):
""" Check if the task is still running. """
self._lock();
fRunning = self._fRunning;
self._unlock();
return fRunning;
def wait(self, cSecTimeout):
""" Wait for the task to complete. """
self._lock();
fRunning = self._fRunning;
if fRunning is True and cSecTimeout > 0:
self._oCv.wait(cSecTimeout)
self._unlock();
return fRunning;
def terminate(self, cSecTimeout = kcSecTerminateTimeout):
""" Terminate the task. """
self._lock();
self._fShouldTerminate = True;
self._unlock();
return self.wait(cSecTimeout);
def _shouldTerminate(self):
"""
Returns True if we should terminate, False if not.
"""
self._lock();
fShouldTerminate = self._fShouldTerminate == True;
self._unlock();
return fShouldTerminate;
class TestBoxTestDriverTask(TestBoxBaseTask):
"""
Base class for tasks involving test drivers.
"""
## When to flush the backlog of log messages.
kcchMaxBackLog = 32768;
## The backlog sync time (seconds).
kcSecBackLogFlush = 30;
## The timeout for the cleanup job (5 mins).
kcSecCleanupTimeout = 300;
## The timeout to wait for the abort command before killing it.
kcSecAbortTimeout = 300;
## The timeout to wait for the final output to be processed.
kcSecFinalOutputTimeout = 180;
## The timeout to wait for the abort command output to be processed.
kcSecAbortCmdOutputTimeout = 30;
## The timeout to wait for the terminate output to be processed.
kcSecTerminateOutputTimeout = 30;
## The timeout to wait for the kill output to be processed.
kcSecKillOutputTimeout = 30;
## The timeout for talking to the test manager.
ksecTestManagerTimeout = 60;
def __init__(self, oTestBoxScript, fnThreadProc, cSecTimeout, idResult, sScriptCmdLine):
"""
Class instance init
"""
# Init our instance data.
self._idResult = idResult;
self._sScriptCmdLine = sScriptCmdLine;
self._oChild = None;
self._oBackLogLock = threading.RLock();
self._oBackLogFlushLock = threading.RLock();
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
# Init super.
TestBoxBaseTask.__init__(self, oTestBoxScript, cSecTimeout, fnThreadProc);
def terminate(self, cSecTimeout = kcSecCleanupTimeout):
""" Reimplement with higher default timeout. """
return TestBoxBaseTask.terminate(self, cSecTimeout);
def _logFlush(self, oGivenConnection = None):
"""
Flushes the log to the test manager.
No exceptions.
"""
fRc = True;
self._oBackLogFlushLock.acquire();
# Grab the current back log.
self._oBackLogLock.acquire();
asBackLog = self._asBackLog;
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
self._oBackLogLock.release();
# If there is anything to flush, flush it.
if len(asBackLog) > 0:
sBody = '';
for sLine in asBackLog:
sBody += sLine + '\n';
oConnection = None;
try:
if oGivenConnection is None:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
oConnection.close();
else:
oGivenConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
except Exception, oXcpt:
testboxcommons.log('_logFlush error: %s' % (oXcpt,));
if len(sBody) < self.kcchMaxBackLog * 4:
self._oBackLogLock.acquire();
asBackLog.extend(self._asBackLog);
self._asBackLog = asBackLog;
# Don't restore _cchBackLog as there is no point in retrying immediately.
self._oBackLogLock.release();
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
self._oBackLogFlushLock.release();
return fRc;
def flushLogOnConnection(self, oConnection):
"""
Attempts to flush the logon the given connection.
No exceptions.
"""
return self._logFlush(oConnection);
def _logInternal(self, sMessage, fPrefix = True, fFlushCheck = False):
"""
Internal logging.
Won't flush the backlog, returns a flush indicator so the caller can
do it instead.
"""
if fPrefix:
try:
oNow = datetime.utcnow();
sTs = '%02u:%02u:%02u.%06u ' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
except Exception, oXcpt:
sTs = 'oXcpt=%s ' % (oXcpt);
sFullMsg = sTs + sMessage;
else:
sFullMsg = sMessage;
self._oBackLogLock.acquire();
self._asBackLog.append(sFullMsg);
cchBackLog = self._cchBackLog + len(sFullMsg) + 1;
self._cchBackLog = cchBackLog;
secTsBackLogFlush = self._secTsBackLogFlush;
self._oBackLogLock.release();
testboxcommons.log(sFullMsg);
return fFlushCheck \
and ( cchBackLog >= self.kcchMaxBackLog \
or utils.timestampSecond() - secTsBackLogFlush >= self.kcSecBackLogFlush);
def _log(self, sMessage):
"""
General logging function, will flush.
"""
if self._logInternal(sMessage, fFlushCheck = True):
self._logFlush();
return True;
def _reportDone(self, sResult):
"""
Report EXEC job done to the test manager.
sResult is a value from constants.result.
"""
## @todo optimize this to use one server connection.
#
# Log it.
#
assert sResult in constants.result.g_kasValidResults;
self._log('Done %s' % (sResult,));
#
# Report it.
#
fRc = True;
secStart = utils.timestampSecond();
while True:
self._logFlush(); ## @todo Combine this with EXEC_COMPLETED.
oConnection = None;
try:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.EXEC_COMPLETED, {constants.tbreq.EXEC_COMPLETED_PARAM_RESULT: sResult});
oConnection.close();
except Exception, oXcpt:
if utils.timestampSecond() - secStart < self.ksecTestManagerTimeout:
self._log('_reportDone exception (%s) - retrying...' % (oXcpt,));
time.sleep(2);
continue;
self._log('_reportDone error: %s' % (oXcpt,));
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
break;
#
# Mark the task as completed.
#
self._complete();
return fRc;
def _assembleArguments(self, sAction, fWithInterpreter = True):
"""
Creates an argument array for subprocess.Popen, splitting the
sScriptCmdLine like bourne shell would.
fWithInterpreter is used (False) when checking that the script exists.
Returns None on bad input.
"""
#
# This is a good place to export the test set id to the environment.
#
os.environ['TESTBOX_TEST_SET_ID'] = str(self._idResult);
cTimeoutLeft = utils.timestampSecond() - self._tsSecStarted;
cTimeoutLeft = 0 if cTimeoutLeft >= self._cSecTimeout else self._cSecTimeout - cTimeoutLeft;
os.environ['TESTBOX_TIMEOUT'] = str(cTimeoutLeft);
os.environ['TESTBOX_TIMEOUT_ABS'] = str(self._tsSecStarted + self._cSecTimeout);
#
# Do replacements and split the command line into arguments.
#
if self._sScriptCmdLine.find('@ACTION@') >= 0:
sCmdLine = self._sScriptCmdLine.replace('@ACTION@', sAction);
else:
sCmdLine = self._sScriptCmdLine + ' ' + sAction;
for sVar in [ 'TESTBOX_PATH_BUILDS', 'TESTBOX_PATH_RESOURCES', 'TESTBOX_PATH_SCRATCH', 'TESTBOX_PATH_SCRIPTS',
'TESTBOX_PATH_UPLOAD', 'TESTBOX_UUID', 'TESTBOX_REPORTER', 'TESTBOX_ID', 'TESTBOX_TEST_SET_ID',
'TESTBOX_TIMEOUT', 'TESTBOX_TIMEOUT_ABS' ]:
if sCmdLine.find('${' + sVar + '}') >= 0:
sCmdLine = sCmdLine.replace('${' + sVar + '}', os.environ[sVar]);
asArgs = utils.argsSplit(sCmdLine);
#
# Massage argv[0]:
# - Convert portable slashes ('/') to the flavor preferred by the
# OS we're currently running on.
# - Run python script thru the current python interpreter (important
# on systems that doesn't sport native hash-bang script execution).
#
asArgs[0] = asArgs[0].replace('/', os.path.sep);
if not os.path.isabs(asArgs[0]):
asArgs[0] = os.path.join(self._oTestBoxScript.getPathScripts(), asArgs[0]);
if asArgs[0].endswith('.py') and fWithInterpreter:
if sys.executable is not None and len(sys.executable) > 0:
asArgs.insert(0, sys.executable);
else:
asArgs.insert(0, 'python');
return asArgs;
def _outputThreadProc(self, oChild, oStdOut, sAction):
"""
Thread procedure for the thread that reads the output of the child
process. We use a dedicated thread for this purpose since non-blocking
I/O may be hard to keep portable according to hints around the web...
"""
oThread = oChild.oOutputThread;
while not oThread.fPleaseQuit:
# Get a line.
try:
sLine = oStdOut.readline();
except Exception, oXcpt:
self._log('child (%s) pipe I/O error: %s' % (sAction, oXcpt,));
break;
# EOF?
if len(sLine) == 0:
break;
# Strip trailing new line (DOS and UNIX).
if sLine.endswith("\r\n"):
sLine = sLine[0:-2];
elif sLine.endswith("\n"):
sLine = sLine[0:-1];
# Log it.
if self._logInternal(sLine, fPrefix = False, fFlushCheck = True):
self._logFlush();
# Close the stdout pipe in case we were told to get lost.
try:
oStdOut.close();
except Exception, oXcpt:
self._log('warning: Exception closing stdout pipe of "%s" child: %s' % (sAction, oXcpt,));
# This is a bit hacky, but try reap the child so it won't hang as
# defunkt during abort/timeout.
if oChild.poll() is None:
for _ in range(15):
time.sleep(0.2);
if oChild.poll() is not None:
break;
oChild = None;
return None;
def _spawnChild(self, sAction):
"""
Spawns the child process, returning success indicator + child object.
"""
# Argument list.
asArgs = self._assembleArguments(sAction)
if asArgs is None:
self._log('Malformed command line: "%s"' % (self._sScriptCmdLine,));
return (False, None);
# Spawn child.
try:
oChild = subprocess.Popen(asArgs,
shell = False,
bufsize = -1,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
cwd = self._oTestBoxScript.getPathSpill(),
universal_newlines = True,
close_fds = (False if utils.getHostOs() == 'win' else True),
preexec_fn = (None if utils.getHostOs() in ['win', 'os2']
else os.setsid)); # pylint: disable=E1101
except Exception, oXcpt:
self._log('Error creating child process %s: %s' % (asArgs, oXcpt));
return (False, None);
oChild.sTestBoxScriptAction = sAction;
# Start output thread, extending the child object to keep track of it.
oChild.oOutputThread = threading.Thread(target=self._outputThreadProc, args=(oChild, oChild.stdout, sAction))
oChild.oOutputThread.daemon = True;
oChild.oOutputThread.fPleaseQuit = False; # Our extension.
oChild.oOutputThread.start();
return (True, oChild);
def _monitorChild(self, cSecTimeout, fTryKillCommand = True, oChild = None):
"""
Monitors the child process. If the child executes longer that
cSecTimeout allows, we'll terminate it.
Returns Success indicator and constants.result value.
"""
if oChild is None:
oChild = self._oChild;
iProcGroup = oChild.pid;
if utils.getHostOs() in ['win', 'os2'] or iProcGroup <= 0:
iProcGroup = -2;
#
# Do timeout processing and check the health of the child.
#
sResult = constants.result.PASSED;
seStarted = utils.timestampSecond();
while True:
# Check status.
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" completed with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
oChild.oOutputThread.join(self.kcSecFinalOutputTimeout);
if oChild is self._oChild:
self._oChild = None;
if iRc == constants.rtexitcode.SKIPPED:
return (True, constants.result.SKIPPED);
if iRc != constants.rtexitcode.SUCCESS:
return (False, constants.result.FAILED);
return (True, constants.result.PASSED);
# Check for abort first, since that has less of a stigma.
if self._shouldTerminate() is True:
sResult = constants.result.ABORTED;
break;
# Check timeout.
cSecElapsed = utils.timestampSecond() - seStarted;
if cSecElapsed > cSecTimeout:
self._log('Timeout: %u secs (limit %u secs)' % (cSecElapsed, cSecTimeout));
sResult = constants.result.TIMED_OUT;
break;
# Wait.
cSecLeft = cSecTimeout - cSecElapsed;
oChild.oOutputThread.join(15 if cSecLeft > 15 else (cSecLeft + 1));
#
# If the child is still alive, try use the abort command to stop it
# very gently. This let's the testdriver clean up daemon processes
# and such that our code below won't catch.
#
if fTryKillCommand and oChild.poll() is None:
self._log('Attempting to abort child...');
(fRc2, oAbortChild) = self._spawnChild('abort');
if oAbortChild is not None and fRc2 is True:
self._monitorChild(self.kcSecAbortTimeout, False, oAbortChild);
oAbortChild = None;
#
# If the child is still alive, try the polite way.
#
if oChild.poll() is None:
self._log('Attempting to terminate child doing "%s"...' % (oChild.sTestBoxScriptAction,));
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGTERM); # pylint: disable=E1101
except Exception, oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
try:
self._oChild.terminate();
oChild.oOutputThread.join(self.kcSecTerminateOutputTimeout);
except Exception, oXcpt:
self._log('terminate() failed: %s' % (oXcpt,));
#
# If the child doesn't respond to polite, kill it. Always do a killpg
# should there be any processes left in the group.
#
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGKILL); # pylint: disable=E1101
except Exception, oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
if oChild.poll() is None:
self._log('Attemting to kill child doing "%s"...' % (oChild.sTestBoxScriptAction,));
try:
self._oChild.kill();
oChild.oOutputThread.join(self.kcSecKillOutputTimeout);
except Exception, oXcpt:
self._log('kill() failed: %s' % (oXcpt,));
#
# Give the whole mess a couple of more seconds to respond in case the
# output thread exitted prematurely for some weird reason.
#
if oChild.poll() is None:
time.sleep(2);
time.sleep(2);
time.sleep(2);
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" aborted with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
else:
self._log('Child doing "%s" is still running, giving up...' % (oChild.sTestBoxScriptAction,));
## @todo in this case we should probably try reboot the testbox...
oChild.oOutputThread.fPleaseQuit = True;
if oChild is self._oChild:
self._oChild = None;
return (False, sResult);
def _terminateChild(self):
"""
Terminates the child forcefully.
"""
if self._oChild is not None:
pass;
def _cleanupAfter(self):
"""
Cleans up after a test failure. (On success, cleanup is implicit.)
"""
assert self._oChild is None;
#
# Tell the script to clean up.
#
if len(self._sScriptCmdLine) > 0: # can be empty if cleanup crashed.
(fRc, self._oChild) = self._spawnChild('cleanup-after');
if fRc is True:
(fRc, _) = self._monitorChild(self.kcSecCleanupTimeout, False);
self._terminateChild();
else:
fRc = False;
#
# Wipe the stuff clean.
#
fRc2 = self._oTestBoxScript.reinitScratch(fnLog = self._log);
return fRc and fRc2;
class TestBoxCleanupTask(TestBoxTestDriverTask):
"""
Special asynchronous task for cleaning up a stale test when starting the
testbox script. It's assumed that the reason for the stale test lies in
it causing a panic, reboot, or similar, so we'll also try collect some
info about recent system crashes and reboots.
"""
def __init__(self, oTestBoxScript):
# Read the old state, throwing a fit if it's invalid.
sScriptState = oTestBoxScript.getPathState();
sScriptCmdLine = self._readStateFile(os.path.join(sScriptState, 'script-cmdline.txt'));
sResultId = self._readStateFile(os.path.join(sScriptState, 'result-id.txt'));
try:
idResult = int(sResultId);
if idResult <= 0 or idResult >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sResultId, os.path.join(sScriptState, 'result-id.txt')));
sTestBoxId = self._readStateFile(os.path.join(sScriptState, 'testbox-id.txt'));
try:
self.idTestBox = int(sTestBoxId);
if self.idTestBox <= 0 or self.idTestBox >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sTestBoxId, os.path.join(sScriptState, 'testbox-id.txt')));
self.sTestBoxName = self._readStateFile(os.path.join(sScriptState, 'testbox-name.txt'));
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, self.kcSecCleanupTimeout,
idResult, sScriptCmdLine);
@staticmethod
def _readStateFile(sPath):
"""
Reads a state file, returning a string on success and otherwise raising
an exception.
"""
try:
oFile = open(sPath, "rb");
sStr = oFile.read();
oFile.close();
return sStr.strip();
except Exception, oXcpt:
raise Exception('Failed to read "%s": %s' % (sPath, oXcpt));
def _threadProc(self):
"""
Perform the actual clean up on script startup.
"""
#
# First make sure we won't repeat this exercise should it turn out to
# trigger another reboot/panic/whatever.
#
sScriptCmdLine = os.path.join(self._oTestBoxScript.getPathState(), 'script-cmdline.txt');
try:
os.remove(sScriptCmdLine);
oFile = open(sScriptCmdLine, 'wb');
oFile.close();
except Exception, oXcpt:
self._log('Error truncating "%s": %s' % (sScriptCmdLine, oXcpt));
#
# Report the incident.
#
self._log('Seems we rebooted!');
self._log('script-cmdline="%s"' % (self._sScriptCmdLine));
self._log('result-id=%d' % (self._idResult));
self._log('testbox-id=%d' % (self.idTestBox));
self._log('testbox-name=%s' % (self.sTestBoxName));
self._logFlush();
# System specific info.
sOs = utils.getHostOs();
if sOs == 'darwin':
self._log('NVRAM Panic Info:\n%s\n' % (self.darwinGetPanicInfo(),));
self._logFlush();
## @todo Add some special command for reporting this situation so we get something
# useful in the event log.
#
# Do the cleaning up.
#
self._cleanupAfter();
self._reportDone(constants.result.REBOOTED);
return False;
def darwinGetPanicInfo(self):
"""
Returns a string with the aapl,panic-info content.
"""
# Retriev the info.
try:
sRawInfo = utils.processOutputChecked(['nvram', 'aapl,panic-info']);
except Exception, oXcpt:
return 'exception running nvram: %s' % (oXcpt,);
# Decode (%xx) and decompact it (7-bit -> 8-bit).
ahDigits = \
{
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
};
sInfo = '';
off = len('aapl,panic-info') + 1;
iBit = 0;
bLow = 0;
while off < len(sRawInfo):
# isprint is used to determine whether to %xx or %c it, so we have to
# be a little careful before assuming % sequences are hex bytes.
if sRawInfo[off] == '%' \
and off + 3 <= len(sRawInfo) \
and sRawInfo[off + 1] in ahDigits \
and sRawInfo[off + 2] in ahDigits:
bCur = ahDigits[sRawInfo[off + 1]] * 0x10 + ahDigits[sRawInfo[off + 2]];
off += 3;
else:
bCur = ord(sRawInfo[off]);
off += 1;
sInfo += chr(((bCur & (0x7f >> iBit)) << iBit) | bLow);
bLow = bCur >> (7 - iBit);
if iBit < 6:
iBit += 1;
else:
# Final bit in sequence.
sInfo += chr(bLow);
bLow = 0;
iBit = 0;
# Expand shorthand.
sInfo = sInfo.replace('@', 'com.apple.');
sInfo = sInfo.replace('>', 'com.apple.driver.');
sInfo = sInfo.replace('|', 'com.apple.iokit.');
sInfo = sInfo.replace('$', 'com.apple.security.');
sInfo = sInfo.replace('!A', 'Apple');
sInfo = sInfo.replace('!a', 'Action');
sInfo = sInfo.replace('!B', 'Bluetooth');
sInfo = sInfo.replace('!C', 'Controller');
sInfo = sInfo.replace('!F', 'Family');
sInfo = sInfo.replace('!I', 'Intel');
sInfo = sInfo.replace('!U', 'AppleUSB');
sInfo = sInfo.replace('!P', 'Profile');
# Done.
return sInfo
class TestBoxExecTask(TestBoxTestDriverTask):
"""
Implementation of a asynchronous EXEC task.
This uses a thread for doing the actual work, i.e. starting and monitoring
the child process, processing its output, and more.
"""
def __init__(self, oTestBoxScript, idResult, sScriptZips, sScriptCmdLine, cSecTimeout):
"""
Class instance init
"""
# Init our instance data.
self._sScriptZips = sScriptZips;
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, cSecTimeout, idResult, sScriptCmdLine);
@staticmethod
def _writeStateFile(sPath, sContent):
"""
Writes a state file, raising an exception on failure.
"""
try:
oFile = open(sPath, "wb");
oFile.write(sContent);
oFile.flush();
try: os.fsync(oFile.fileno());
except: pass;
oFile.close();
except Exception, oXcpt:
raise Exception('Failed to write "%s": %s' % (sPath, oXcpt));
return True;
def _saveState(self):
"""
Saves the task state on disk so we can launch a TestBoxCleanupTask job
if the test should cause system panic or similar.
Note! May later be extended to support tests that reboots the host.
"""
sScriptState = self._oTestBoxScript.getPathState();
try:
self._writeStateFile(os.path.join(sScriptState, 'script-cmdline.txt'), self._sScriptCmdLine);
self._writeStateFile(os.path.join(sScriptState, 'result-id.txt'), str(self._idResult));
self._writeStateFile(os.path.join(sScriptState, 'testbox-id.txt'), str(self._oTestBoxScript.getTestBoxId()));
self._writeStateFile(os.path.join(sScriptState, 'testbox-name.txt'), self._oTestBoxScript.getTestBoxName());
except Exception, oXcpt:
self._log('Failed to write state: %s' % (oXcpt,));
return False;
return True;
def _downloadAndUnpackScriptZips(self):
"""
Downloads/copies the script ZIPs into TESTBOX_SCRIPT and unzips them to
the same directory.
Raises no exceptions, returns log + success indicator instead.
"""
sPathScript = self._oTestBoxScript.getPathScripts();
asArchives = self._sScriptZips.split(',');
for sArchive in asArchives:
sArchive = sArchive.strip();
if len(sArchive) == 0:
continue;
# Figure the destination name (in scripts).
sDstFile = webutils.getFilename(sArchive);
if len(sDstFile) < 1 \
or re.search('[^a-zA-Z0-9 !#$%&\'()@^_`{}~.-]', sDstFile) is not None: # FAT charset sans 128-255 + '.'.
self._log('Malformed script zip filename: %s' % (sArchive,));
return False;
sDstFile = os.path.join(sPathScript, sDstFile);
# Do the work.
if webutils.downloadFile(sArchive, sDstFile, self._oTestBoxScript.getPathBuilds(), self._log, self._log) is not True:
return False;
asFiles = utils.unpackFile(sDstFile, sPathScript, self._log, self._log);
if asFiles is None:
return False;
# Since zip files doesn't always include mode masks, set the X bit
# of all of them so we can execute binaries and hash-bang scripts.
for sFile in asFiles:
utils.chmodPlusX(sFile);
return True;
def _threadProc(self):
"""
Do the work of an EXEC command.
"""
sResult = constants.result.PASSED;
#
# Start by preparing the scratch directories.
#
# Note! Failures at this stage are not treated as real errors since
# they may be caused by the previous test and other circumstances
# so we don't want to go fail a build because of this.
#
fRc = self._oTestBoxScript.reinitScratch(self._logInternal);
fNeedCleanUp = fRc;
if fRc is True:
fRc = self._downloadAndUnpackScriptZips();
testboxcommons.log2('_threadProc: _downloadAndUnpackScriptZips -> %s' % (fRc,));
if fRc is not True:
sResult = constants.result.BAD_TESTBOX;
#
# Make sure the script exists.
#
if fRc is True:
sScript = self._assembleArguments('none', fWithInterpreter = False)[0];
if not os.path.exists(sScript):
self._log('The test driver script "%s" cannot be found.' % (sScript,));
sDir = sScript;
while len(sDir) > 3:
sDir = os.path.dirname(sDir);
if os.path.exists(sDir):
self._log('First existing parent directory is "%s".' % (sDir,));
break;
fRc = False;
if fRc is True:
#
# Start testdriver script.
#
fRc = self._saveState();
if fRc:
(fRc, self._oChild) = self._spawnChild('all');
testboxcommons.log2('_threadProc: _spawnChild -> %s, %s' % (fRc, self._oChild));
if fRc:
(fRc, sResult) = self._monitorChild(self._cSecTimeout);
testboxcommons.log2('_threadProc: _monitorChild -> %s' % (fRc,));
# If the run failed, do explicit cleanup.
if fRc is not True:
testboxcommons.log2('_threadProc: explicit cleanups...');
self._terminateChild();
self._cleanupAfter();
fNeedCleanUp = False;
assert self._oChild is None;
#
# Clean up scratch.
#
if fNeedCleanUp:
if self._oTestBoxScript.reinitScratch(self._logInternal) is not True:
self._log('post run reinitScratch failed.');
fRc = False;
#
# Report status and everything back to the test manager.
#
if fRc is False and sResult == constants.result.PASSED:
sResult = constants.result.FAILED;
self._reportDone(sResult);
return fRc;
|
bridge_gui.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.error import CannotListenError, ReactorNotRunning
from autobahn.twisted.websocket import WebSocketServerFactory, listenWS,WebSocketClientFactory
log.startLogging(sys.stdout)
from utils.rosbridge_websocket import Websocket
from utils.bridge_ui import Ui_MainWindow
from PyQt5.QtWidgets import QMainWindow,QWidget,QApplication,QHeaderView,QTableWidgetItem,QAbstractItemView,QLabel
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QThread,pyqtSignal,QObject,QTimer,QStringListModel,QRect
import sys
import rospy
import time
import socket
import os
import signal
from multiprocessing import Process, Queue, Manager,Value, Lock
import psutil
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
class Message(QLabel):
def __init__(self,parent=None):
super(Message,self).__init__(parent)
self.color={'red':'255,0,0','green':'0,255,0','white':'255,255,255','black':'0,0,0','blue':'0,0,255','orange':'255,153,0'}
self.setGeometry(QRect(0, 805, 996, 25))
font = QFont()
font.setPointSize(15)
self.setFont(font)
self.setObjectName("INS_GPS_message")
self.time=QTimer()
self.time.timeout.connect(self.gradients)
self.transparent=0
self.backgroundColor='0,0,0'
self.textColor='0,0,0'
self.Text=''
def colorConstraint(self,color,Type):
if type(color) is str:
if color in self.color:
if Type=='background':
self.backgroundColor = self.color[color]
return True
elif Type=='text':
self.textColor=self.color[color]
return True
else:
return False
else:
return False
elif type(color) is list or type(color) is tuple:
if len(color) == 3 and max(color) <= 255 and min(color) >= 0:
if Type=='background':
self.backgroundColor = str(color)[1:-1]
return True
elif Type=='text':
self.textColor=str(color)[1:-1]
return True
else:
return False
else:
return False
def setStatusMessage(self,Text,backgroundColor,textColor):
self.transparent=250
self.setText(Text)
self.time.start(50)
if not self.colorConstraint(backgroundColor,'background'):
raise KeyError('颜色设置错误!')
if not self.colorConstraint(textColor,'text'):
raise KeyError('颜色设置错误!')
def gradients(self):
if self.transparent>=0:
self.setStyleSheet('background-color:'+'rgba('+self.backgroundColor+','+str(self.transparent)+
');color: rgba('+self.textColor+','+str(self.transparent)+');')
self.transparent-=10
else:
self.time.stop()
def verify_IP_and_port(IP,port):
if len(IP) and len(str(port)):
ip=IP.split('.')
if not ip[0].isdigit():
ip[0]=ip[0][5:]
for ip_ in ip:
if not ip_.isdigit():
return False
if len(ip)==4 and '' not in ip:
if int(min(ip))>=0 and int(max(ip))<=255:
if int(port)>=0 and int(port)<=65535:
return True
return False
class MyWebSocketClientFactory(WebSocketClientFactory):
def __init__(self,parent=None):
super(WebSocketClientFactory,self).__init__(parent)
def doStop(self):
if self.numPorts == 0:
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
os.kill(os.getpid(), signal.SIGTERM)
self.stopFactory()
class MyWindows(QMainWindow,Ui_MainWindow,QWidget):
def __init__(self,shared_data,parent=None):
super(Ui_MainWindow,self).__init__(parent)
self.setupUi(self)
#设定合适行列宽度
self.topic_list.horizontalHeader().setStretchLastSection(True)
self.topic_list.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.request_topic.horizontalHeader().setStretchLastSection(True)
self.request_topic.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
#设定字体大小
font = QFont()
font.setPixelSize(15)
self.topic_list.setFont(font)
self.request_topic.setFont(font)
#设定不可编辑
self.topic_list.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.topic_list.setSelectionBehavior(QAbstractItemView.SelectRows)
self.request_topic.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.request_topic.setSelectionBehavior(QAbstractItemView.SelectRows)
self.server_client_list.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.current_receive.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.current_send.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.message=Message(self.centralwidget)
self.slot_function_connect()
self.set_address()
self.shared_data=shared_data
self.proc_websocket=None
self.color={'red':'255,0,0','green':'0,255,0','white':'255,255,255','black':'0,0,0','blue':'0,0,255','orange':'255,153,0'}
self.set_status_display(self.server_status,'white','red','未启动')
self.set_status_display(self.client_status,'white','red','未启动')
self.server_stop.setEnabled(False)
self.client_stop.setEnabled(False)
self.current_topic_list=[]
self.current_client_list=[]
self.topic_list_update_timer=QTimer()
self.topic_list_update_timer.timeout.connect(self.list_update)
self.topic_list_update_timer.start(500)
self.client_start_connect=False
self.client_start_time=QTimer()
self.client_start_time.timeout.connect(self.websocket_client_connecting)
self.topic_list_dict={}
def list_update(self):
current_topic_list=[]
try:
current_topic_list=rospy.get_published_topics()
except Exception as e:
print(str(e))
if current_topic_list != self.current_topic_list and isinstance(current_topic_list,list):
self.current_topic_list=current_topic_list
self.topic_list.clear()
self.topic_list.setRowCount(len(self.current_topic_list))
for i,data in enumerate(self.current_topic_list):
topic_name_item=QTableWidgetItem(data[0])
topic_type_item=QTableWidgetItem(data[1])
self.topic_list.setItem(i,0,topic_name_item)
self.topic_list.setItem(i, 1, topic_type_item)
if self.current_client_list != self.shared_data['clients']:
self.current_client_list=self.shared_data['clients'].copy()
qlist=QStringListModel()
qlist.setStringList(self.current_client_list)
self.server_client_list.setModel(qlist)
#print(self.shared_data['topic_list'])
if self.topic_list_dict!=self.shared_data['topic_list']:
rows=0
self.topic_list_dict=self.shared_data['topic_list'].copy()
for op_list in self.topic_list_dict.values():
rows+=len(op_list)
self.request_topic.clear()
self.request_topic.setRowCount(rows)
its=0
for key,data in self.topic_list_dict.items():
if key == '9999':
for j,value in enumerate(data):
request_user=QTableWidgetItem(self.connected_server_url)
request_op=QTableWidgetItem(value['op'])
request_topic=QTableWidgetItem(value['topic'])
self.request_topic.setItem(its,0,request_user)
self.request_topic.setItem(its,1,request_op)
self.request_topic.setItem(its,2,request_topic)
its+=1
if self.shared_data['client_connected']:
self.set_status_display(self.client_status, 'white', 'green', '已连接')
def slot_function_connect(self):
self.server_begin.clicked.connect(self.websocket_server_start)
self.server_stop.clicked.connect(self.websocket_server_stop)
self.client_begin.clicked.connect(self.websocket_client_start)
self.client_stop.clicked.connect(self.websocket_client_stop)
def set_status_display(self,label,text_color,background_color,text):
label.setStyleSheet('background-color:' + 'rgb(' + self.color[background_color] + ');color: rgb(' + self.color[text_color] + ');')
label.setText(text)
def set_address(self):
self.server_IP_select.addItem(get_host_ip())
self.server_IP_select.addItem('127.0.0.1')
def websocket_server_stop(self):
try:
os.kill(self.shared_data['websocket_pid'],signal.SIGUSR1)
except:
pass
time.sleep(0.5)
self.proc_websocket.terminate()
self.set_status_display(self.server_status, 'white', 'red', '未启动')
self.client.setEnabled(True)
self.server_begin.setEnabled(True)
self.server_stop.setEnabled(False)
self.server_url.setText(' ')
self.shared_data['start_websocket'] = False
self.shared_data['clients']=[]
def websocket_server_start(self):
url='ws://'+self.server_IP_select.currentText().split('//')[-1]+':'+self.server_port.text()
self.server_url.setText(url)
self.shared_data['url']=url
self.client.setEnabled(False)
self.set_status_display(self.server_status,'white','orange','启动中')
try:
self.shared_data['start_websocket']=True
self.proc_websocket=Process(target=websoket_server_process,args=(data,),daemon=True)
self.proc_websocket.start()
self.set_status_display(self.server_status, 'white', 'green', '已启动')
self.server_begin.setEnabled(False)
self.server_stop.setEnabled(True)
except:
self.shared_data['start_websocket'] = False
self.set_status_display(self.server_status, 'white', 'red', '未启动')
self.client.setEnabled(True)
def websocket_client_stop(self):
if int(self.shared_data['websocket_pid']) in psutil.pids():
os.kill(self.shared_data['websocket_pid'],signal.SIGUSR1)
time.sleep(0.5)
self.proc_websocket.terminate()
self.shared_data['client_connected'] = False
self.set_status_display(self.client_status, 'white', 'red', '未连接')
self.server.setEnabled(True)
self.client_begin.setEnabled(True)
self.client_stop.setEnabled(False)
self.client_url.setText(' ')
self.shared_data['start_websocket'] = False
if self.client_start_time.isActive():
self.client_start_time.stop()
self.client_start_connect=False
def websocket_client_connecting(self):
self.set_status_display(self.client_status, 'white', 'orange', '连接中')
if self.client_start_connect :
if not self.shared_data['client_connected']:
self.proc_websocket = Process(target=websocket_client_process, args=(data,), daemon=True)
self.proc_websocket.start()
else:
self.set_status_display(self.client_status, 'white', 'green', '已连接')
self.client_start_connect = False
else :
self.client_start_time.stop()
def websocket_client_start(self):
if not (len(self.client_IP.text()) and len(self.client_port.text())) :
self.message.setStatusMessage('请设置IP和端口','red','white')
elif not verify_IP_and_port(self.client_IP.text(),self.client_port.text()):
self.message.setStatusMessage('请设置正确的IP和端口', 'red', 'white')
else:
self.client_start_connect=True
self.connected_server_url=self.client_IP.text().split('//')[-1] + ':' + self.client_port.text()
url = 'ws://' + self.connected_server_url
self.client_url.setText(url)
self.shared_data['url'] = url
self.shared_data['IP'] = self.client_IP.text().split('//')[-1]
self.shared_data['port'] = self.client_port.text()
self.server.setEnabled(False)
self.client_begin.setEnabled(False)
self.client_stop.setEnabled(True)
# self.client_start_time.start(500)
self.proc_websocket = Process(target=websocket_client_process, args=(data,), daemon=True)
self.proc_websocket.start()
def websoket_server_process(data):
rospy.init_node("server_bridge")
websocket = Websocket(data)
data['websocket_pid']=os.getpid()
signal.signal(signal.SIGUSR1, websocket.websocket_stop_signal)
websocket.run()
os.kill(os.getpid(), signal.SIGTERM)
def websocket_client_process(data):
rospy.init_node("client_bridge")
websocket = Websocket(data,False)
data['websocket_pid']=os.getpid()
signal.signal(signal.SIGUSR1, websocket.websocket_stop_signal)
websocket.run()
os.kill(os.getpid(), signal.SIGTERM)
if __name__ == "__main__":
app = QApplication(sys.argv)
manager=Manager()
data=manager.dict()
data['url']=None
data['IP']=None
data['port']=None
data['websocket_pid']=None
data['start_websocket']=None
data['clients']=[]
data['client_connected']=False
data['topic_list']={}
my_win = MyWindows(data)
my_win.show()
app.exec_()
try:
my_win.proc_websocket.terminate()
except:
pass
sys.exit(0)
|
main.py
|
# MIT License
#
# Copyright (c) 2022 Luke Ingram
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# main.py
import os,sys
from scripts.image import Img
import cv2
import threading
#TODO investegate pixel loss in pdf output
def convert(img,imgpath,dest,save):
try:
if dest[-1] != '/':
dest += '/'
basename = os.path.splitext(os.path.basename(imgpath))[0]
image = Img(basename,img)
if save:
savepath = "img/dewarp/" + basename
if not os.path.isdir(savepath):
os.mkdir(savepath)
worker = threading.Thread(target=image.saveAll,args=(savepath,))
worker.start()
worker.join()
f = open(dest+basename+'.pdf',"wb+")
f.write(image.getPdf())
f.close()
return 0
except IOError as e:
return -1
def main(imgpath,dest,save):
(stauts,msg) = (-1,"unknown error")
if os.path.splitext(imgpath)[1] not in {".jpeg",".png",".jpg",".tiff",".tif",".JPG"}:
(status,msg) = (-1,"unsupported file format")
elif not os.path.isdir(dest):
(status,msg) = (-1,"destination directory not found")
elif not os.path.isfile(imgpath):
(status,msg) = (-1,"specified image not found")
else:
img = cv2.imread(imgpath)
if img.size == 0:
(status,msg) = (1,"unable to open specified file")
else:
if convert(img,imgpath,dest,save) < 0:
(status,msg) = (1,"unable to create pdf")
else:
(status,msg) = (0,"conversion successful")
return (status,msg)
|
telegram-client.py
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import threading
import websocket
import sys
class TelegramClient:
def __init__(self, _url, _token):
self.url = _url
self.token = _token
self.socket = None
self.bot = None
self.chat_id = None
self.connect_to_websocket()
self.init_telegram_bot()
def init_telegram_bot(self):
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", self.on_telegram_start_command))
dp.add_handler(CommandHandler("stop", self.on_telegram_stop_command))
dp.add_handler(CommandHandler("help", self.on_telegram_help_command))
dp.add_handler(MessageHandler(Filters.text, self.on_telegram_message))
dp.add_error_handler(self.on_telegram_error)
updater.start_polling()
updater.idle()
def connect_to_websocket(self):
self.socket = websocket.WebSocketApp(self.url, on_open=self.on_websocket_open,
on_message=self.on_websocket_message,
on_error=self.on_websocket_error, on_close=self.on_websocket_close)
threading.Thread(target=self.socket.run_forever).start()
# ------ WebSocket methods ------
def on_websocket_message(self, ws, message):
if self.bot is None:
print('Telegram bot not yet available to pass on message from WebSocket')
return
self.bot.send_message(chat_id=self.chat_id, text=message)
@staticmethod
def on_websocket_open(ws):
print('Connected to ' + ws.url)
@staticmethod
def on_websocket_error(ws, error):
print(error)
@staticmethod
def on_websocket_close(ws):
print("### closed ###")
# ------ Telegram methods ------
def on_telegram_start_command(self, bot, update):
self.bot = bot
self.chat_id = update.message.chat_id
update.message.reply_text('Connection to WebSocket established.')
def on_telegram_stop_command(self, bot, update):
self.socket.close()
self.socket = None
self.bot = None
self.chat_id = None
update.message.reply_text('Connection to WebSocket is closed. Use /start to reconnect.')
def on_telegram_message(self, bot, update):
if self.socket is None:
update.message.reply_text('No connection to WebSocket. Use /start first.')
return
self.socket.send(update.message.text)
@staticmethod
def on_telegram_help_command(bot, update):
update.message.reply_text('TODO write help text')
@staticmethod
def on_telegram_error(bot, update, error):
print(update, error)
if __name__ == "__main__":
if len(sys.argv) > 1:
url = sys.argv[1]
else:
print('Enter the WebSocket URL:')
url = input()
if not url.startswith('ws://'):
url = 'ws://' + url
token = '' # intentionally not committed to the repository
TelegramClient(url, token)
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, config_entries
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers)
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage, device_registry)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[device_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform:
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None,
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
bluemo_ble.py
|
import logging
import sys
from abc import ABCMeta, abstractmethod
import time
import threading
from threading import Semaphore
from bgapi.module import BlueGigaClient, GATTService, GATTCharacteristic
from bgapi.module import BlueGigaServer, ProcedureManager, BLEScanResponse
from bgapi.cmd_def import gap_discover_mode, connection_status_mask
from pybluemo.bluemo_msg import *
term = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
term.setFormatter(formatter)
logger = logging.getLogger("YASP")
logger.addHandler(term)
logger.setLevel(logging.INFO)
#error_handler = logging.FileHandler("error_log.txt")
#error_logger = logging.getLogger("YASP")
#error_logger.addHandler(error_handler)
#error_logger.setLevel(logging.ERROR)
YASP_BLE_MTU = 20
YASP_SRV_UUID = b"\x3F\x41\xB4\xD5\xB4\xE1\x4E\x43\x81\xC9\x32\x9C\x01\x00\x52\x15"
YASP_CHR_UUID = b"\x3F\x41\xB4\xD5\xB4\xE1\x4E\x43\x81\xC9\x32\x9C\x02\x00\x52\x15"
class AbstractBaseYasp(ProcedureManager):
__metaclass__ = ABCMeta
def __init__(self):
super(AbstractBaseYasp, self).__init__()
self.buffer = b""
self.cached_response = None
self.cmd_callbacks = {}
self.def_callbacks = {}
self.buffer_lock = threading.Semaphore(1)
@abstractmethod
def serial_tx(self, data): raise NotImplementedError()
def serial_rx(self, data):
logger.log(logging.DEBUG, "<=" + "".join(["%02X" % j for j in data]))
self.buffer_lock.acquire(True)
self.buffer += data
self.buffer_lock.release()
self.rx_processing()
def rx_processing(self):
if len(self.buffer) > 1:
hdr_len = 2
length = self.buffer[0]
if length > 127:
if len(self.buffer) > hdr_len:
length = (self.buffer[0] & 0x7F) + ((self.buffer[1] & 0xF) << 7)
if self.buffer[1] > 0xF:
logger.error("RX Overrun, packets dropped.")
hdr_len += 1
else:
return
if len(self.buffer) >= hdr_len + length:
cmd = self.buffer[hdr_len-1]
message = self.buffer[hdr_len:hdr_len+length]
threading.Thread(target=self.cmd_handler, args=(cmd, message)).start()
self.buffer_lock.acquire(True)
self.buffer = self.buffer[hdr_len+length:]
self.buffer_lock.release()
self.rx_processing()
def send_command(self, callback, msg_defn=MessageDefinition()):
payload = msg_defn.cmd_msg()
if len(payload) > 0x7F:
msg = struct.pack("<BBB%ds" % len(payload), (len(payload) & 0x7F) | 0x80, len(payload) >> 7, msg_defn.get_command_code(), payload)
elif len(payload) > 0:
msg = struct.pack("<BB%ds" % len(payload), len(payload), msg_defn.get_command_code(), payload)
else:
msg = struct.pack("<BB", len(payload), msg_defn.get_command_code())
if callback is not None:
if msg_defn.get_response_code() not in self.cmd_callbacks:
self.cmd_callbacks[msg_defn.get_response_code()] = []
self.cmd_callbacks[msg_defn.get_response_code()].append(callback)
self.serial_tx(msg)
elif msg_defn.get_response_code() in self.def_callbacks:
self.serial_tx(msg)
else:
with self.procedure_call(msg_defn.get_response_code(), timeout=1):
self.serial_tx(msg)
return self.cached_response
def send_response(self, msg_defn=MessageDefinition()):
payload = msg_defn.rsp_msg()
msg = struct.pack("<HB%ds" % len(payload), len(payload), msg_defn.get_response_code(), payload)
self.serial_tx(msg)
def set_default_msg_callback(self, code, callback):
self.def_callbacks[code] = callback
@abstractmethod
def cmd_handler(self, cmd, payload):
pass
class YaspClient(AbstractBaseYasp):
def __init__(self):
super(YaspClient, self).__init__()
for key, value in MSG_CLASS_BY_RSP_CODE.items():
self.cmd_callbacks[key] = []
self.serial_char_handle = None
self.write_wo_response = None
def set_interface(self, handle, wr_wo_resp):
self.serial_char_handle = handle
self.write_wo_response = wr_wo_resp
def serial_tx(self, data):
if not self.serial_char_handle:
raise RuntimeError()
if not self.write_wo_response:
raise RuntimeError()
full_packets = int(len(data) / YASP_BLE_MTU)
for i in range(full_packets):
logger.log(logging.DEBUG, "=>"+"".join(["%02X" % j for j in data[YASP_BLE_MTU*i:YASP_BLE_MTU*(i+1)]]))
self.write_wo_response(self.serial_char_handle, data[YASP_BLE_MTU*i:YASP_BLE_MTU*(i+1)], attempts=3)
if (len(data) % YASP_BLE_MTU) > 0:
logger.log(logging.DEBUG, "=>" + "".join(["%02X" % j for j in data[YASP_BLE_MTU*full_packets:]]))
self.write_wo_response(self.serial_char_handle, data[YASP_BLE_MTU*full_packets:], attempts=3)
def cmd_handler(self, cmd, payload):
if cmd not in MSG_CLASS_BY_RSP_CODE:
raise RuntimeError()
response = MSG_CLASS_BY_RSP_CODE[cmd].rsp_recv(payload)
if len(self.cmd_callbacks[cmd]) == 0: # Callback not specified
if cmd in self.def_callbacks: # Default callback specified
self.def_callbacks[cmd](response)
else:
self.cached_response = response
self.procedure_complete(cmd)
else:
callback = self.cmd_callbacks[cmd].pop()
callback(response)
class YaspServer(AbstractBaseYasp):
def __init__(self):
super(YaspServer, self).__init__()
self.serial_char_handle = None
self.write_attribute = None
def set_interface(self, handle, write_attr):
self.serial_char_handle = handle
self.write_attribute = write_attr
def serial_tx(self, data):
if not self.serial_char_handle:
raise RuntimeError()
if not self.write_attribute:
raise RuntimeError()
full_packets = int(len(data) / YASP_BLE_MTU)
for i in range(full_packets):
chunk = data[YASP_BLE_MTU*i:YASP_BLE_MTU*(i+1)]
logger.log(logging.DEBUG, "=>" + "".join(["%02X" % j for j in chunk]))
self.write_attribute(self.serial_char_handle, offset=0, value=chunk, timeout=1)
if (len(data) % YASP_BLE_MTU) > 0:
chunk = data[YASP_BLE_MTU*full_packets:]
logger.log(logging.DEBUG, "=>" + "".join(["%02X" % j for j in chunk]))
class YaspBlueGigaClient(BlueGigaClient):
def __init__(self, port, baud=115200, timeout=1):
super(YaspBlueGigaClient, self).__init__(port, baud, timeout)
self.scan_filter_name = None
self.scan_filter_service = None
self.scan_filter_result = None
def connect_by_name(self, name, yasp_client, conn_interval_min=0x20,
conn_interval_max=0x30, connection_timeout=100, latency=0, scan_timeout=1):
self.scan_filter_name = name
now = start = time.time()
self._api.ble_cmd_gap_discover(mode=gap_discover_mode['gap_discover_observation'])
while now < start + scan_timeout and self.scan_filter_result is None:
time.sleep(scan_timeout - (now - start))
now = time.time()
self._api.ble_cmd_gap_end_procedure()
if self.scan_filter_result is not None:
logger.log(logging.INFO, "Intiating connection to %s" % ":".join(["%02X" % i for i in self.scan_filter_result.sender[::-1]]))
connection = self.connect(self.scan_filter_result, scan_timeout, conn_interval_min,
conn_interval_max, connection_timeout, latency)
logger.log(logging.INFO, "Connected.")
connection.read_by_group_type(GATTService.PRIMARY_SERVICE_UUID)
for service in connection.get_services():
if service.uuid == YASP_SRV_UUID:
connection.find_information(service=service)
connection.read_by_type(service=service, type=GATTCharacteristic.CHARACTERISTIC_UUID)
connection.read_by_type(service=service, type=GATTCharacteristic.CLIENT_CHARACTERISTIC_CONFIG)
break
else:
raise Exception("Could not find YASP_SRV_UUID in device services!")
logger.log(logging.INFO, "YASP service discovery complete.")
for characteristic in connection.get_characteristics():
if characteristic.has_notify():
connection.characteristic_subscription(characteristic=characteristic, indicate=False, notify=True)
logger.log(logging.INFO, "YASP characteristic notifications enabled.")
serial_chr_handle = connection.get_handles_by_uuid(YASP_CHR_UUID)[0]
yasp_client.set_interface(serial_chr_handle, connection.wr_noresp_by_handle)
connection.assign_attrclient_value_callback(serial_chr_handle, yasp_client.serial_rx)
return connection.handle
else:
return None
def ble_evt_gap_scan_response(self, rssi, packet_type, sender, address_type, bond, data):
super(YaspBlueGigaClient, self).ble_evt_gap_scan_response(rssi, packet_type, sender, address_type, bond, data)
result = BLEScanResponse(rssi, packet_type, sender, address_type, bond, data)
print("Advertisement data: " + "".join(["%c" % i for i in data]))
if self.scan_filter_name is not None and self.scan_filter_service is not None:
if self.scan_filter_name in data and self.scan_filter_service in data:
self.scan_filter_result = result
elif self.scan_filter_name is not None:
if self.scan_filter_name in data:
self.scan_filter_result = result
elif self.scan_filter_service is not None:
if self.scan_filter_service in data:
self.scan_filter_result = result
class YaspBlueGigaServer(BlueGigaServer):
def __init__(self, port):
super(YaspBlueGigaServer, self).__init__(port)
self.pipe_logs_to_terminal(level=logging.WARNING)
self.yasp_server = None
self.connected = False
self.yasp_chr_handle = None
self.keep_running = True
for handle in range(0x00FF):
handle_type = self.read_type(handle)
if handle_type == YASP_CHR_UUID:
print("Found YASP_CHR_UUID @ %d." % handle)
self.yasp_chr_handle = handle
break
else:
raise RuntimeError("Could not find required YASP characteristic.")
def start_yasp_server(self, yasp_server):
self.yasp_server = yasp_server
self.yasp_server.set_interface(handle=self.yasp_chr_handle, wr_wo_resp=self.write_attribute)
self.keep_running = True
self.advertise_general()
def stop_yasp_server(self):
self.keep_running = False
self.reset_ble_state()
def ble_evt_attributes_value(self, connection, reason, handle, offset, value):
super(YaspBlueGigaServer, self).ble_evt_attributes_value(connection, reason, handle, offset, value)
if handle == self.yasp_chr_handle:
self.yasp_server.serial_rx(value)
def ble_evt_connection_disconnected(self, connection, reason):
super(YaspBlueGigaServer, self).ble_evt_connection_disconnected(connection, reason)
self.connected = False
if self.keep_running:
self.start_yasp_server(self.yasp_server)
def ble_evt_connection_status(self, connection, flags, address, address_type, conn_interval, timeout, latency, bonding):
super(YaspBlueGigaServer, self).ble_evt_connection_status(connection, flags, address, address_type, conn_interval, timeout, latency, bonding)
if flags & connection_status_mask["connection_connected"]:
self.connected = True
def main():
pass
if __name__ == "__main__":
main()
|
main.py
|
from datetime import datetime
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Event
from threading import Thread
from time import sleep
from analysis import Analysis
from logs import Logs
from trading import Trading
from twitter import Twitter
# Whether to send all logs to the cloud instead of a local file.
LOGS_TO_CLOUD = True
# The duration of the smallest backoff step in seconds.
BACKOFF_STEP_S = 0.1
# The maximum number of retry steps, equivalent to 0.1 * (2^12 - 1) = 409.5
# seconds of total delay. This is the largest interval that one backoff
# sequence may take.
MAX_TRIES = 12
# The time in seconds after which to reset a backoff sequence. This is the
# smallest interval at which backoff sequences may repeat normally.
BACKOFF_RESET_S = 30 * 60
# The host for the monitor Web server.
MONITOR_HOST = "0.0.0.0"
# The port for the monitor Web server.
MONITOR_PORT = 80
# The message returned by the monitor Web server.
MONITOR_MESSAGE = "OK"
class Monitor:
"""A monitor exposing a Web server while the main loop is running."""
def __init__(self):
"""Creates a Web server on a background thread."""
self.server = HTTPServer((MONITOR_HOST, MONITOR_PORT),
self.MonitorHandler)
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
def start(self):
"""Starts the Web server background thread."""
self.thread.start()
def stop(self):
"""Stops the Web server and background thread."""
self.server.shutdown()
self.server.server_close()
class MonitorHandler(BaseHTTPRequestHandler):
"""An HTTP request handler that responds with "OK" while running."""
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(MONITOR_MESSAGE.encode("utf-8"))
def do_HEAD(self):
self._set_headers()
class Main:
"""A wrapper for the main application logic and retry loop."""
def __init__(self):
self.logs = Logs(name="main", to_cloud=LOGS_TO_CLOUD)
self.twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
def twitter_callback(self, tweet):
"""Analyzes Trump tweets, trades stocks, and tweets about it."""
# Initialize the Analysis, Logs, Trading, and Twitter instances inside
# the callback to create separate httplib2 instances per thread.
analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
logs = Logs(name="main-callback", to_cloud=LOGS_TO_CLOUD)
# Analyze the tweet.
companies = analysis.find_companies(tweet)
logs.info("Using companies: %s" % companies)
if not companies:
return
# Trade stocks.
trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)
trading.make_trades(companies)
# Tweet about it.
twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
twitter.tweet(companies, tweet)
def run_session(self):
"""Runs a single streaming session. Logs and cleans up after
exceptions.
"""
self.logs.info("Starting new session.")
try:
self.twitter.start_streaming(self.twitter_callback)
except:
self.logs.catch()
finally:
self.twitter.stop_streaming()
self.logs.info("Ending session.")
def backoff(self, tries):
"""Sleeps an exponential number of seconds based on the number of
tries.
"""
delay = BACKOFF_STEP_S * pow(2, tries)
self.logs.warn("Waiting for %.1f seconds." % delay)
sleep(delay)
def run(self):
"""Runs the main retry loop with exponential backoff."""
tries = 0
while True:
# The session blocks until an error occurs.
self.run_session()
# Remember the first time a backoff sequence starts.
now = datetime.now()
if tries == 0:
self.logs.debug("Starting first backoff sequence.")
backoff_start = now
# Reset the backoff sequence if the last error was long ago.
if (now - backoff_start).total_seconds() > BACKOFF_RESET_S:
self.logs.debug("Starting new backoff sequence.")
tries = 0
backoff_start = now
# Give up after the maximum number of tries.
if tries >= MAX_TRIES:
self.logs.warn("Exceeded maximum retry count.")
break
# Wait according to the progression of the backoff sequence.
self.backoff(tries)
# Increment the number of tries for the next error.
tries += 1
if __name__ == "__main__":
monitor = Monitor()
monitor.start()
try:
Main().run()
finally:
monitor.stop()
|
__init__.py
|
"""
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils import env_manager as _EnvManager
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if is_in_databricks_runtime() else ""
_logger.info(
"To install these model dependencies, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the python environment
for model inference. Note that environment is only restored in the context
of the PySpark UDF; the software environment outside of the UDF is
unaffected. Default value is ``local``, and the following values are
supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model.
- ``virtualenv``: Use virtualenv to restore the python environment that
was used to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
_EnvManager.validate(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager == _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
_get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
env_root_dir=env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
if env_manager != _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
env_root_dir_on_executor = env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=env_manager,
env_root_dir=env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
# TODO: adjust timeout for server requests handler.
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
|
ttest.py
|
import threading
import time
def thread_target():
i = 1
while True:
print(i)
time.sleep(1)
flag = True
thread_obj = threading.Thread(target = thread_target)
thread_obj.start()
thread_obj.join(timeout = 5)
flag = False
|
test_integration.py
|
import collections
import threading
import subprocess
import os
import json
import mock
import pwd
import socket
import pytest
try:
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
Request = collections.namedtuple('Request', ['command', 'path', 'headers', 'body'])
class SentryHTTPServer(HTTPServer):
timeout = 0.1
def __init__(self, *args, **kwargs):
requests = kwargs.pop('requests')
HTTPServer.__init__(self, *args, **kwargs)
self.requests = requests
def handle_timeout(self):
pass
class SentryHTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
body_len = int(self.headers.get('Content-Length', '0'))
body = self.rfile.read(body_len)
request = Request(command=self.command, path=self.path, headers=dict(self.headers.items()), body=body)
self.server.requests.append(request)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
body = json.dumps({'status': 'ok'}).encode('utf-8')
self.send_header('Content-Length', str(len(body)))
self.end_headers()
self.wfile.write(body)
class UUTHTTPServer(object):
def __init__(self):
self.running = False
self.address = None
self._thread = None
self.requests = []
self._started = threading.Condition()
@property
def uri(self):
return 'http://sentry:password@{0}/'.format(':'.join(map(str, self.address)))
def run(self):
self.running = True
httpd = SentryHTTPServer(('127.0.0.1', 0), SentryHTTPRequestHandler, requests=self.requests)
self.address = httpd.server_address
self._started.acquire()
self._started.notify_all()
self._started.release()
while self.running:
httpd.handle_request()
def start(self):
t = threading.Thread(target=self.run)
self._thread = t
t.start()
self._started.acquire()
self._started.wait()
self._started.release()
def stop(self):
if self.running:
self.running = False
self._thread.join()
@pytest.yield_fixture
def http_server():
t_s = UUTHTTPServer()
t_s.start()
yield t_s
t_s.stop()
FAIL_NO_OUTPUT = '''#!/bin/bash
exit 1
'''
FAIL_LONG_OUTPUT = '''#!/bin/bash
head -c 2000 /usr/share/dict/words
tail -c 2000 /usr/share/dict/words >&2
exit 1
'''
@pytest.fixture
def scripts(tmpdir):
paths = {}
for script in ('FAIL_NO_OUTPUT', 'FAIL_LONG_OUTPUT'):
with open(os.path.join(str(tmpdir), script), 'w') as f:
f.write(globals()[script])
os.fchmod(f.fileno(), 0o700)
paths[script] = os.path.join(str(tmpdir), script)
return paths
def test_no_output(http_server, scripts):
subprocess.check_call(
['shentry.py', scripts['FAIL_NO_OUTPUT']],
env={
'SHELL_SENTRY_DSN': http_server.uri,
'TZ': 'UTC',
}
)
# ensure that the http server has processed all requests
http_server.stop()
assert len(http_server.requests) == 1
req = http_server.requests[0]
assert req.command == 'POST'
body = json.loads(req.body.decode('utf-8'))
assert body == {
'device': mock.ANY,
'event_id': mock.ANY,
'extra': {
'PATH': mock.ANY,
'TZ': 'UTC',
'_sent_with': mock.ANY,
'command': scripts['FAIL_NO_OUTPUT'],
'duration': mock.ANY,
'load_average_at_exit': mock.ANY,
'returncode': 1,
'shell': '/bin/sh',
'start_time': mock.ANY,
'username': pwd.getpwuid(os.getuid()).pw_name,
'working_directory': mock.ANY,
},
'fingerprint': mock.ANY,
'message': 'Command `{0}` failed with code 1.\n'.format(scripts['FAIL_NO_OUTPUT']),
'platform': 'other',
'server_name': socket.gethostname(),
'level': 'error',
'sdk': {
'name': 'shentry',
'version': mock.ANY,
},
'timestamp': mock.ANY,
}
def test_multi_kb_output(http_server, scripts):
subprocess.check_call(
['shentry.py', scripts['FAIL_LONG_OUTPUT']],
env={
'SHELL_SENTRY_DSN': http_server.uri,
'TZ': 'UTC',
}
)
# ensure that the http server has processed all requests
http_server.stop()
assert len(http_server.requests) == 1
req = http_server.requests[0]
assert req.command == 'POST'
body = json.loads(req.body.decode('utf-8'))
assert body == {
'device': mock.ANY,
'event_id': mock.ANY,
'extra': {
'PATH': mock.ANY,
'TZ': 'UTC',
'_sent_with': mock.ANY,
'command': scripts['FAIL_LONG_OUTPUT'],
'duration': mock.ANY,
'load_average_at_exit': mock.ANY,
'returncode': 1,
'shell': '/bin/sh',
'start_time': mock.ANY,
'username': pwd.getpwuid(os.getuid()).pw_name,
'working_directory': mock.ANY,
},
'fingerprint': mock.ANY,
'message': mock.ANY,
'platform': 'other',
'server_name': socket.gethostname(),
'level': 'error',
'sdk': {
'name': 'shentry',
'version': mock.ANY,
},
'timestamp': mock.ANY,
}
assert body['message'].startswith(
'Command `{0}` failed with code 1.\n\nExcerpt of stderr:\n'.format(scripts['FAIL_LONG_OUTPUT'])
)
|
nng.py
|
"""
Provides a Pythonic interface to cffi nng bindings
"""
import logging
import weakref
import threading
from ._nng import ffi, lib
from .exceptions import check_err, ConnectionRefused
from . import options
from . import _aio
logger = logging.getLogger(__name__)
# a mapping of id(sock): sock for use in callbacks. When a socket is
# initialized, it adds itself to this dict. When a socket is closed, it
# removes itself from this dict. In order to allow sockets to be garbage
# collected, a weak reference to the socket is stored here instead of the
# actual socket.
__all__ = '''
ffi
Bus0
Pair0
Pair1
Pull0 Push0
Pub0 Sub0
Req0 Rep0
Socket
Surveyor0 Respondent0
'''.split()
_live_sockets = weakref.WeakValueDictionary()
def to_char(charlike):
"""Convert str or bytes to char*."""
# fast path for stuff that doesn't need to be changed.
if isinstance(charlike, ffi.CData):
return charlike
if isinstance(charlike, str):
charlike = charlike.encode()
charlike = ffi.new('char[]', charlike)
return charlike
class _NNGOption:
"""A descriptor for more easily getting/setting NNG option."""
# this class should not be instantiated directly! Instantiation will work,
# but getting/setting will fail.
# subclasses set _getter and _setter to the module-level getter and setter
# functions
_getter = None
_setter = None
def __init__(self, option_name):
self.option = to_char(option_name)
def __get__(self, instance, owner):
# have to look up the getter on the class
if self._getter is None:
raise TypeError("{} cannot be set".format(self.__class__))
return self.__class__._getter(instance, self.option)
def __set__(self, instance, value):
if self._setter is None:
raise TypeError("{} is readonly".format(self.__class__))
self.__class__._setter(instance, self.option, value)
class IntOption(_NNGOption):
"""Descriptor for getting/setting integer options"""
_getter = options._getopt_int
_setter = options._setopt_int
class MsOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_ms
_setter = options._setopt_ms
class SockAddrOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_sockaddr
class SizeOption(_NNGOption):
"""Descriptor for getting/setting size_t options"""
_getter = options._getopt_size
_setter = options._setopt_size
class StringOption(_NNGOption):
"""Descriptor for getting/setting string options"""
_getter = options._getopt_string
_setter = options._setopt_string
class BooleanOption(_NNGOption):
"""Descriptor for getting/setting boolean values"""
_getter = options._getopt_bool
_setter = options._setopt_bool
class NotImplementedOption(_NNGOption):
"""Represents a currently un-implemented option in Python."""
def __init__(self, option_name, errmsg):
super().__init__(option_name)
self.errmsg = errmsg
def __get__(self, instance, owner):
raise NotImplementedError(self.errmsg)
def __set__(self, instance, value):
raise NotImplementedError(self.errmsg)
class Socket:
"""
The base socket. It should generally not be instantiated directly.
See the documentation for __init__ for initialization options.
Sockets have the following attributes:
Attributes:
name (str): The socket name. Corresponds to ``NNG_OPT_SOCKNAME``.
This is for debugging purposes.
raw (bool): A boolean, indicating whether the socket is raw or cooked.
Returns True if the socket is raw, else False. This property is
read-only. Corresponds to library option ``NNG_OPT_RAW``. For
more information see
https://nanomsg.github.io/nng/man/v1.0.1/nng.7.html#raw_mode.
protocol (int): Read-only option which returns the 16-bit number of the
socket's protocol.
protocol_name (str): Read-only option which returns the name of the
socket's protocol.
peer (int): Returns the peer protocol id for the socket.
recv_timeout (int): Receive timeout, in ms. If a socket takes longer
than the specified time, raises a pynng.exceptions.Timeout.
Corresponds to library option``NNG_OPT_RECVTIMEO``
send_timeout (int): Send timeout, in ms. If the message cannot be
queued in the specified time, raises a pynng.exceptions.Timeout.
Corresponds to library option ``NNG_OPT_SENDTIMEO``.
local_address: Not implemented!!! A read-only property representing
the local address used for communication. The Python wrapper for
[nng_sockaddr](https://nanomsg.github.io/nng/man/v1.0.1/nng_sockaddr.5.html)
needs to be completed first. Corresponds to ``NNG_OPT_LOCADDR``.
reconnect_time_min (int): The minimum time to wait before attempting
reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMINT``. This
can also be overridden on the dialers.
reconnect_time_max (int): The maximum time to wait before attempting
reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMAXT``. If this
is non-zero, then the time between successive connection attempts
will start at the value of reconnect_time_min, and grow
exponentially, until it reaches this value. This option can be set
on the socket, or on the dialers associated with the socket.
recv_fd (int): The receive file descriptor associated with the socket.
This is suitable to be passed into poll functions like poll(),
or select().
send_fd (int): The sending file descriptor associated with the socket.
This is suitable to be passed into poll functions like poll(),
or select().
recv_max_size (int): The largest size of a message to receive.
Messages larger than this size will be silently dropped. A size of
-1 indicates unlimited size.
See also the nng man pages document for options:
https://nanomsg.github.io/nng/man/v1.0.1/nng_options.5.html
"""
# the following options correspond to nng options documented at
# https://nanomsg.github.io/nng/man/v1.0.1/nng_options.5.html
name = StringOption('socket-name')
raw = BooleanOption('raw')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
recv_buffer_size = IntOption('recv-buffer')
send_buffer_size = IntOption('send-buffer')
recv_timeout = MsOption('recv-timeout')
send_timeout = MsOption('send-timeout')
ttl_max = IntOption('ttl-max')
recv_max_size = SizeOption('recv-size-max')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_fd = IntOption('recv-fd')
send_fd = IntOption('send-fd')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, *,
dial=None,
listen=None,
recv_timeout=None,
send_timeout=None,
recv_buffer_size=None,
send_buffer_size=None,
recv_max_size=None,
reconnect_time_min=None,
reconnect_time_max=None,
opener=None,
block_on_dial=None,
name=None,
async_backend=None
):
"""Initialize socket. It takes no positional arguments.
Most socket options can be set through the initializer for convenience.
Note:
The following arguments are all optional.
Args:
dial: The address to dial. If not given, no address is dialed.
listen: The address to listen at. If not given, the socket does
not listen at any address.
recv_timeout: The receive timeout, in milliseconds. If not given,
there is no timeout.
send_timeout: The send timeout, in milliseconds. If not given,
there is no timeout.
recv_buffer_size: Set receive message buffer size.
send_buffer_size: Sets send message buffer size.
recv_max_size: Maximum size of message to receive. Messages larger
than this size are silently dropped.
async_backend: The event loop backend for asyncronous socket
operations. the currently supported backends are "asyncio" and
"trio". If ``async_backend`` is not provided, pynng will use
sniffio to attempt to find the currently running event loop.
"""
# mapping of id: Python objects
self._dialers = {}
self._listeners = {}
self._pipes = {}
self._on_pre_pipe_add = []
self._on_post_pipe_add = []
self._on_post_pipe_remove = []
self._pipe_notify_lock = threading.Lock()
self._async_backend = async_backend
self._socket = ffi.new('nng_socket *',)
if opener is not None:
self._opener = opener
if opener is None and not hasattr(self, '_opener'):
raise TypeError('Cannot directly instantiate a Socket. Try a subclass.')
check_err(self._opener(self._socket))
if recv_timeout is not None:
self.recv_timeout = recv_timeout
if send_timeout is not None:
self.send_timeout = send_timeout
if recv_max_size is not None:
self.recv_max_size = recv_max_size
if reconnect_time_min is not None:
self.reconnect_time_min = reconnect_time_min
if reconnect_time_max is not None:
self.reconnect_time_max = reconnect_time_max
if recv_buffer_size is not None:
self.recv_buffer_size = recv_buffer_size
if send_buffer_size is not None:
self.send_buffer_size = send_buffer_size
if listen is not None:
self.listen(listen)
if dial is not None:
self.dial(dial, block=block_on_dial)
_live_sockets[id(self)] = self
as_void = ffi.cast('void *', id(self))
# set up pipe callbacks
check_err(lib.nng_pipe_notify(self.socket, lib.NNG_PIPE_EV_ADD_PRE,
lib._nng_pipe_cb, as_void))
check_err(lib.nng_pipe_notify(self.socket, lib.NNG_PIPE_EV_ADD_POST,
lib._nng_pipe_cb, as_void))
check_err(lib.nng_pipe_notify(self.socket, lib.NNG_PIPE_EV_REM_POST,
lib._nng_pipe_cb, as_void))
def dial(self, address, *, block=None):
"""Dial the specified address.
Args:
addres: The address to dial.
block: Whether to block or not. There are three possible values
this can take:
1. If truthy, a blocking dial is attempted. If it fails for
any reason, an exception is raised.
2. If Falsy, a non-blocking dial is started. The dial is
retried periodically in the background until it is
successful.
3. (**Default behavior**): If ``None``, which a blocking dial
is first attempted. If it fails an exception is logged
(using the Python logging module), then a non-blocking dial
is done.
"""
if block:
self._dial(address, flags=0)
elif block is None:
try:
self.dial(address, block=False)
except ConnectionRefused:
msg = 'Synchronous dial failed; attempting asynchronous now'
logger.exception(msg)
self.dial(address, block=False)
else:
self._dial(address, flags=lib.NNG_FLAG_NONBLOCK)
def _dial(self, address, flags=0):
"""Dial specified address
``dialer`` and ``flags`` usually do not need to be given.
"""
dialer = ffi.new('nng_dialer *')
ret = lib.nng_dial(self.socket, to_char(address), dialer, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
d_id = lib.nng_dialer_id(dialer[0])
self._dialers[d_id] = Dialer(dialer, self)
def listen(self, address, flags=0):
"""Listen at specified address; similar to nanomsg.bind()
``listener`` and ``flags`` usually do not need to be given.
"""
listener = ffi.new('nng_listener *')
ret = lib.nng_listen(self.socket, to_char(address), listener, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
l_id = lib.nng_listener_id(listener[0])
self._listeners[l_id] = Listener(listener, self)
def close(self):
"""Close the socket, freeing all system resources."""
# if a TypeError occurs (e.g. a bad keyword to __init__) we don't have
# the attribute _socket yet. This prevents spewing extra exceptions
if hasattr(self, '_socket'):
lib.nng_close(self.socket)
# cleanup the list of listeners/dialers. A program would be likely to
# segfault if a user accessed the listeners or dialers after this
# point.
self._listeners = {}
self._dialers = {}
def __del__(self):
self.close()
@property
def socket(self):
return self._socket[0]
def recv(self, block=True):
"""Receive data on the socket.
Args:
block: If block is True (the default), the function will not return
until the operation is completed or times out. If block is False,
the function will return data immediately. If no data is ready on
the socket, the function will raise ``pynng.TryAgain``.
"""
# TODO: someday we should support some kind of recv_into() operation
# where the user provides the data buffer.
flags = lib.NNG_FLAG_ALLOC
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
data = ffi.new('char **')
size_t = ffi.new('size_t *')
ret = lib.nng_recv(self.socket, data, size_t, flags)
check_err(ret)
recvd = ffi.unpack(data[0], size_t[0])
lib.nng_free(data[0], size_t[0])
return recvd
def send(self, data):
"""Sends ``data`` on socket."""
err = lib.nng_send(self.socket, data, len(data), 0)
check_err(err)
async def arecv(self):
"""Asynchronously receive a message."""
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""Asynchronously send a message."""
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.asend(data)
def __enter__(self):
return self
def __exit__(self, *tb_info):
self.close()
@property
def dialers(self):
"""A list of the active dialers"""
return tuple(self._dialers.values())
@property
def listeners(self):
"""A list of the active listeners"""
return tuple(self._listeners.values())
@property
def pipes(self):
"""A list of the active pipes"""
return tuple(self._pipes.values())
def _add_pipe(self, lib_pipe):
# this is only called inside the pipe callback.
pipe_id = lib.nng_pipe_id(lib_pipe)
pipe = Pipe(lib_pipe, self)
self._pipes[pipe_id] = pipe
return pipe
def _remove_pipe(self, lib_pipe):
pipe_id = lib.nng_pipe_id(lib_pipe)
del self._pipes[pipe_id]
def new_context(self):
"""
Return a new Context for this socket.
"""
return Context(self)
def new_contexts(self, n):
"""
Return ``n`` new contexts for this socket
"""
return [self.new_context() for _ in range(n)]
def add_pre_pipe_connect_cb(self, callback):
"""
Add a callback which will be called before a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a Pipe. The
socket associated with the pipe can be accessed through the pipe's
``socket`` attribute. If the pipe is closed, the callbacks for
post_pipe_connect and post_pipe_remove will not be called.
"""
self._on_pre_pipe_add.append(callback)
def add_post_pipe_connect_cb(self, callback):
"""
Add a callback which will be called after a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a Pipe.
"""
self._on_post_pipe_add.append(callback)
def add_post_pipe_remove_cb(self, callback):
"""
Add a callback which will be called after a Pipe is removed from a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a Pipe.
"""
self._on_post_pipe_remove.append(callback)
def remove_pre_pipe_connect_cb(self, callback):
"""
Remove ``callback`` from the list of callbacks for pre pipe connect
events
"""
self._on_pre_pipe_add.remove(callback)
def remove_post_pipe_connect_cb(self, callback):
"""
Remove ``callback`` from the list of callbacks for post pipe connect
events
"""
self._on_post_pipe_add.remove(callback)
def remove_post_pipe_remove_cb(self, callback):
"""
Remove ``callback`` from the list of callbacks for post pipe remove
events
"""
self._on_post_pipe_remove.remove(callback)
class Bus0(Socket):
"""A bus0 socket."""
_opener = lib.nng_bus0_open
class Pair0(Socket):
"""A pair0 socket."""
_opener = lib.nng_pair0_open
class Pair1(Socket):
"""A pair1 socket."""
_opener = lib.nng_pair1_open
class Pull0(Socket):
"""A pull0 socket."""
_opener = lib.nng_pull0_open
class Push0(Socket):
"""A push0 socket."""
_opener = lib.nng_push0_open
class Pub0(Socket):
"""A pub0 socket."""
_opener = lib.nng_pub0_open
class Sub0(Socket):
"""A sub0 socket."""
_opener = lib.nng_sub0_open
def subscribe(self, topic):
"""Subscribe to the specified topic."""
options._setopt_string(self, b'sub:subscribe', topic)
def unsubscribe(self, topic):
"""Unsubscribe to the specified topic."""
options._setopt_string(self, b'sub:unsubscribe', topic)
class Req0(Socket):
"""A req0 socket."""
_opener = lib.nng_req0_open
class Rep0(Socket):
"""A rep0 socket."""
_opener = lib.nng_rep0_open
class Surveyor0(Socket):
"""A surveyor0 socket."""
_opener = lib.nng_surveyor0_open
class Respondent0(Socket):
"""A respondent0 socket."""
_opener = lib.nng_respondent0_open
class Dialer:
"""Wrapper class for the nng_dialer struct.
You probably don't need to instantiate this directly.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, dialer, socket):
"""
Args:
dialer: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._dialer = dialer
self.socket = socket
@property
def dialer(self):
return self._dialer[0]
def close(self):
"""
Close the dialer.
"""
lib.nng_dialer_close(self.dialer)
del self.socket._dialers[self.id]
@property
def id(self):
return lib.nng_dialer_id(self.dialer)
class Listener:
"""Wrapper class for the nng_dialer struct."""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, listener, socket):
"""
Args:
listener: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._listener = listener
self.socket = socket
@property
def listener(self):
return self._listener[0]
def close(self):
"""
Close the dialer.
"""
lib.nng_listener_close(self.listener)
del self.socket._listeners[self.id]
@property
def id(self):
return lib.nng_listener_id(self.listener)
class Context:
"""
A "context" keeps track of a protocol's state for stateful protocols (like
REQ/REP). A context allows the same socket to be used for multiple
operations at the same time. For example, the following code, **which does
not use contexts**, does terrible things:
.. code-block:: python
# start a socket to service requests.
# HEY THIS IS EXAMPLE BAD CODE, SO DON'T TRY TO USE IT
import pynng
import threading
def service_reqs(s):
while True:
data = s.recv()
# do something with data, e.g.
s.send(b"here's your answer, pal!")
threads = []
with pynng.Rep0(listen='tcp:127.0.0.1:12345') as s:
for _ in range(10):
t = threading.Thread(target=service_reqs, args=[s], daemon=True)
t.start()
threads.append(t)
for thread in threads:
thread.join()
Contexts allow multiplexing a socket in a way that is safe. It removes one
of the biggest use cases for needing to use raw sockets.
Contexts should not be instantiated directly; instead, create a socket, and
call the new_context() method.
"""
def __init__(self, socket):
# need to set attributes first, so that if anything goes wrong,
# __del__() doesn't throw an AttributeError
self._context = None
assert isinstance(socket, Socket)
self._socket = socket
self._context = ffi.new('nng_ctx *')
check_err(lib.nng_ctx_open(self._context, socket.socket))
assert lib.nng_ctx_id(self.context) != -1
async def arecv(self):
"""
Asynchronously receive data using this context.
"""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""
Asynchronously send data using this context.
"""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.asend(data)
def recv(self):
"""
Synchronously receive data on this context.
"""
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
check_err(lib.nng_ctx_recv(self.context, aio))
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
msg = lib.nng_aio_get_msg(aio)
try:
size = lib.nng_msg_len(msg)
data = ffi.cast('char *', lib.nng_msg_body(msg))
py_obj = bytes(ffi.buffer(data[0:size]))
finally:
lib.nng_msg_free(msg)
finally:
lib.nng_aio_free(aio)
return py_obj
def send(self, data):
"""
Synchronously send data on the socket.
"""
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
msg_p = ffi.new('nng_msg **')
check_err(lib.nng_msg_alloc(msg_p, 0))
msg = msg_p[0]
lib.nng_msg_append(msg, data, len(data))
check_err(lib.nng_aio_set_msg(aio, msg))
check_err(lib.nng_ctx_send(self.context, aio))
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
finally:
lib.nng_aio_free(aio)
def _free(self):
ctx_err = 0
if self._context is not None:
if lib.nng_ctx_id(self.context) != -1:
ctx_err = lib.nng_ctx_close(self.context)
self._context = None
check_err(ctx_err)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._free()
@property
def context(self):
"""Return the underlying nng object."""
return self._context[0]
def __del__(self):
self._free()
@ffi.def_extern()
def _nng_pipe_cb(lib_pipe, event, arg):
sock_id = int(ffi.cast('size_t', arg))
sock = _live_sockets[sock_id]
# exceptions don't propagate out of this function, so if any exception is
# raised in any of the callbacks, we just log it (using logger.exception).
with sock._pipe_notify_lock:
pipe_id = lib.nng_pipe_id(lib_pipe)
if event == lib.NNG_PIPE_EV_ADD_PRE:
# time to do our bookkeeping; actually create the pipe and attach it to
# the socket
pipe = sock._add_pipe(lib_pipe)
for cb in sock._on_pre_pipe_add:
try:
cb(pipe)
except:
msg = 'Exception raised in pre pipe connect callback'
logger.exception(msg)
if pipe.closed:
# NB: we need to remove the pipe from socket now, before a remote
# tries connecting again and the same pipe ID may be reused. This
# will result in a KeyError below.
sock._remove_pipe(lib_pipe)
elif event == lib.NNG_PIPE_EV_ADD_POST:
pipe = sock._pipes[pipe_id]
for cb in sock._on_post_pipe_add:
try:
cb(pipe)
except:
msg = 'Exception raised in post pipe connect callback'
logger.exception(msg)
elif event == lib.NNG_PIPE_EV_REM_POST:
try:
pipe = sock._pipes[pipe_id]
except KeyError:
# we get here if the pipe was closed in pre_connect earlier. This
# is not a big deal.
logger.debug('Could not find pipe for socket')
return
try:
for cb in sock._on_post_pipe_remove:
try:
cb(pipe)
except:
msg = 'Exception raised in post pipe remove callback'
logger.exception(msg)
finally:
sock._remove_pipe(lib_pipe)
class Pipe:
"""
A "pipe" is a single connection between two endpoints.
https://nanomsg.github.io/nng/man/v1.1.0/nng_pipe.5.
There is no public constructor for a Pipe; they are automatically added to
the underlying socket whenever the pipe is created.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
url = StringOption('url')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, lib_pipe, socket):
# Ohhhhkay
# so
# this is weird, I know
# okay
# so
# For some reason, I'm not sure why, if we keep a reference to lib_pipe
# directly, we end up with memory corruption issues. Maybe it's a
# weird interaction between getting called in a callback and refcount
# or something, I dunno. Anyway, we need to make a copy of the
# lib_pipe object.
self._pipe = ffi.new('nng_pipe *')
self._pipe[0] = lib_pipe
self.pipe = self._pipe[0]
self.socket = socket
self._closed = False
@property
def closed(self):
"""
Return whether the pipe has been closed directly.
This will not be valid if the pipe was closed indirectly, e.g. by
closing the associated listener/dialer/socket.
"""
return self._closed
@property
def id(self):
return lib.nng_pipe_id(self.pipe)
@property
def dialer(self):
"""
Return the dialer this pipe is associated with. If the pipe is not
associated with a dialer, raise an exception
"""
dialer = lib.nng_pipe_dialer(self.pipe)
d_id = lib.nng_dialer_id(dialer)
if d_id < 0:
# TODO: Different exception?
raise TypeError('This pipe has no associated dialers.')
return self.socket._dialers[d_id]
@property
def listener(self):
"""
Return the listener this pipe is associated with. If the pipe is not
associated with a listener, raise an exception
"""
listener = lib.nng_pipe_listener(self.pipe)
l_id = lib.nng_listener_id(listener)
if l_id < 0:
# TODO: Different exception?
raise TypeError('This pipe has no associated listeners.')
return self.socket._listeners[l_id]
def close(self):
"""
Close the pipe.
"""
check_err(lib.nng_pipe_close(self.pipe))
self._closed = True
|
consumer.py
|
# Copyright (c) 2014 Rackspace, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import multiprocessing as mp
import random
import sys
import time
from gevent import monkey as curious_george
curious_george.patch_all(thread=False, select=False)
import gevent
import marktime
from zaqarclient.queues import client
from zaqarclient.transport import errors
from zaqar.bench import config
CONF = config.conf
def claim_delete(queues, stats, test_duration, ttl, grace, limit):
"""Consumer Worker
The Consumer Worker continuously claims and deletes messages
for the specified duration. The time taken for each claim and
delete is recorded for calculating throughput and latency.
"""
end = time.time() + test_duration
claim_total_elapsed = 0
delete_total_elapsed = 0
total_failed_requests = 0
claim_total_requests = 0
delete_total_requests = 0
while time.time() < end:
# NOTE(kgriffs): Distribute requests across all queues evenly.
queue = random.choice(queues)
try:
marktime.start('claim_message')
claim = queue.claim(ttl=ttl, grace=grace, limit=limit)
claim_total_elapsed += marktime.stop('claim_message').seconds
claim_total_requests += 1
except errors.TransportError as ex:
sys.stderr.write("Could not claim messages : {0}\n".format(ex))
total_failed_requests += 1
else:
for msg in claim:
try:
marktime.start('delete_message')
msg.delete()
elapsed = marktime.stop('delete_message').seconds
delete_total_elapsed += elapsed
delete_total_requests += 1
except errors.TransportError as ex:
msg = "Could not delete messages: {0}\n".format(ex)
sys.stderr.write(msg)
total_failed_requests += 1
total_requests = (claim_total_requests +
delete_total_requests +
total_failed_requests)
stats.put({
'total_requests': total_requests,
'claim_total_requests': claim_total_requests,
'delete_total_requests': delete_total_requests,
'claim_total_elapsed': claim_total_elapsed,
'delete_total_elapsed': delete_total_elapsed,
})
def load_generator(stats, num_workers, num_queues,
test_duration, url, ttl, grace, limit):
cli = client.Client(CONF.server_url)
queues = [cli.queue(CONF.queue_prefix + '-' + str(i))
for i in range(num_queues)]
gevent.joinall([
gevent.spawn(claim_delete,
queues, stats, test_duration, ttl, grace, limit)
for _ in range(num_workers)
])
def crunch(stats):
total_requests = 0
claim_total_elapsed = 0.0
delete_total_elapsed = 0.0
claim_total_requests = 0
delete_total_requests = 0
while not stats.empty():
entry = stats.get_nowait()
total_requests += entry['total_requests']
claim_total_elapsed += entry['claim_total_elapsed']
delete_total_elapsed += entry['delete_total_elapsed']
claim_total_requests += entry['claim_total_requests']
delete_total_requests += entry['delete_total_requests']
return (total_requests, claim_total_elapsed, delete_total_elapsed,
claim_total_requests, delete_total_requests)
def run(upstream_queue):
num_procs = CONF.consumer_processes
num_workers = CONF.consumer_workers
num_queues = CONF.num_queues
# Stats that will be reported
duration = 0
total_requests = 0
successful_requests = 0
claim_total_requests = 0
delete_total_requests = 0
throughput = 0
claim_latency = 0
delete_latency = 0
# Performance test
if num_procs and num_workers:
stats = mp.Queue()
# TODO(TheSriram) : Make ttl and grace configurable
args = (stats, num_workers, num_queues, CONF.time, CONF.server_url,
300, 200, CONF.messages_per_claim)
procs = [mp.Process(target=load_generator, args=args)
for _ in range(num_procs)]
if CONF.verbose:
print('\nStarting consumers (cp={0}, cw={1})...'.format(
num_procs, num_workers))
start = time.time()
for each_proc in procs:
each_proc.start()
for each_proc in procs:
each_proc.join()
(total_requests, claim_total_elapsed, delete_total_elapsed,
claim_total_requests, delete_total_requests) = crunch(stats)
successful_requests = claim_total_requests + delete_total_requests
duration = time.time() - start
# NOTE(kgriffs): Duration should never be zero
throughput = successful_requests / duration
if claim_total_requests:
claim_latency = (1000 * claim_total_elapsed /
claim_total_requests)
if delete_total_requests:
delete_latency = (1000 * delete_total_elapsed /
delete_total_requests)
upstream_queue.put({
'consumer': {
'duration_sec': duration,
'total_reqs': total_requests,
'claim_total_requests': claim_total_requests,
'successful_reqs': successful_requests,
'messages_processed': delete_total_requests,
'reqs_per_sec': throughput,
'ms_per_claim': claim_latency,
'ms_per_delete': delete_latency,
}
})
|
ads1x15.py
|
from ..devices import Sensor
import Adafruit_ADS1x15
from time import sleep
import threading
from collections import deque
import numpy
class ADS1X15(Sensor):
_MAX_VALUE = 32767
_CHANNELS = 4
GAINS = numpy.array([[2/3, 6.144],
[1, 4.096],
[2, 2.048],
[4, 1.024],
[8, 0.512],
[16, 0.128]])
def __init__(self,
bus=1,
address=0x48,
v_ref=3.3,
averages=10,
max_data_length=100,
name=""):
"""Constructor"""
self._bus = 1
self._address = 0x48
self.v_ref = v_ref
self._gain = self._find_gain()
self._averages = 10
self._measurements = []
self._results = [4000] * self._CHANNELS
# threading stuff
self._lock = threading.Lock()
self._thread = None
self._thread_alive = False
for channel in range(0,4):
self._measurements.append(deque(maxlen=self._averages))
super(ADS1X15, self).__init__(name, max_data_length)
self.start()
def _find_gain(self):
"""Find the correct gain according to the given vref"""
gain = 2/3
for i in range(1, self.GAINS.shape[0]):
if self.GAINS[-i][1] > self.v_ref:
gain = int(self.GAINS[-i][0])
self.v_ref = self.GAINS[-i][1]
break
return gain
def start(self):
"""Initialize hardware and os resources."""
self.adc = Adafruit_ADS1x15.ADS1115(address=self._address,busnum=self._bus)
if not self._thread_alive:
self._thread_alive = True
self._thread = threading.Thread(target=self._update_channels, args=(), daemon=True)
self._thread.start()
def stop(self):
"""Free hardware and os resources."""
self._thread_alive = False
self._thread.join()
self.adc.stop_adc()
def _update_channels(self):
"""Periodically aquires the moving average of all adc channels"""
while self._thread_alive:
for channel in range(0, 4):
self._measurements[channel].append(self._read_channel(channel))
# to add lock
if len(self._measurements[channel]) == self._averages:
with self._lock:
self._results[channel] = sum(self._measurements[channel]) / self._averages
sleep(0.05)
print("ADC thread terminating...")
def _read_channel(self, channel):
"""Read a sigle's channel value"""
if 0 <= channel and channel < 4:
return self.adc.read_adc(channel, gain=self._gain)
def read(self, channel, SAVE=False):
"""Read result and transform it to voltage"""
with self._lock:
value = float(self._results[channel]) / self._MAX_VALUE * self.v_ref
if SAVE:
self.update_data(value)
return value
|
test_urlcall.py
|
import json
import socketserver
import unittest
import threading
from http.server import BaseHTTPRequestHandler
from prompy.networkio.call_factory import Caller, CallRoute
from prompy.networkio.urlcall import url_call, json_call
from prompy.promise import Promise
from prompy.threadio.tpromise import TPromise
from tests.test_promise import threaded_test, _catch_and_raise
class MockServer(BaseHTTPRequestHandler):
def set_headers(self, response_code=200, content_type='text/html', headers={}):
self.send_response(response_code)
self.send_header('Content-Type', content_type)
for k, v in headers.items():
self.send_header(k, v)
self.end_headers()
def do_GET(self):
self.set_headers()
if 'testurlparams' in self.path:
self.wfile.write(self.path.encode('utf-8'))
else:
self.wfile.write('hello'.encode('utf-8'))
def do_HEAD(self):
self.set_headers()
def do_POST(self):
data = self._get_data().decode('utf-8')
if self.path == '/testjson':
content_type = self.get_content_type()
j = json.loads(data)
msg = j.get('msg')
self.set_headers(content_type='application/json')
self.wfile.write(json.dumps({'said': msg}).encode('utf-8'))
else:
self.set_headers()
self.wfile.write(f'You said {data}'.encode('utf-8'))
def _get_data(self):
content_len = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_len)
return post_data
def get_content_type(self) -> str:
return self.headers.get('Content-Type')
def mock_server():
port = 8000
with socketserver.TCPServer(("", port), MockServer) as httpd:
httpd.serve_forever()
t = threading.Thread(target=mock_server)
t.daemon = True
t.start()
class TestUrlCall(unittest.TestCase):
@threaded_test
def test_urlcall(self):
def get_then(rep):
self.assertEqual(rep.content.decode('utf-8'), 'hello')
get_call = url_call("http://localhost:8000/", prom_type=TPromise)
get_call.then(get_then).catch(_catch_and_raise)
def post_then(rep):
self.assertEqual(rep.content.decode('utf-8'), 'You said hello')
post_call = url_call('http://localhost:8000/help',
method='POST', data='hello'.encode('utf-8'), prom_type=TPromise)
post_call.then(post_then).catch(_catch_and_raise)
def json_then(rep):
print(rep)
said = rep.content.get('said')
self.assertEqual(said, 'hello')
j = json_call('http://localhost:8000/testjson', method='POST', payload={'msg': 'hello'}, prom_type=TPromise)
j.then(json_then).catch(_catch_and_raise)
@threaded_test
def test_call_factory(self):
class TestCaller(Caller):
def call_home(self, **kwargs):
return CallRoute('/')
def call_post_json(self, **kwargs):
return CallRoute('/testjson', method='POST')
def call_url_params(self, p):
return CallRoute('/testurlparams/<p>')
caller = TestCaller(base_url='http://localhost:8000', prom_type=TPromise)
p: Promise = caller.call_home()
@p.then
def _p_then(rep):
self.assertEqual(rep.content, 'hello')
p.catch(_catch_and_raise)
p2: Promise = caller.call_post_json(data={'msg': 'you got a call'})
@p2.then
def _p2_then(rep):
self.assertTrue(isinstance(rep.content, dict))
self.assertEqual(rep.content.get('said'), 'you got a call')
param = 'option'
p3: Promise = caller.call_url_params(param)
@p3.then
def _p3_then(rep):
self.assertTrue(param in rep.content)
|
test_rpc.py
|
import asyncio
import threading
import logging
import multiprocessing as mp
import pytest
import msgpack
from msgpackio.client import Client
from msgpackio.rpc import RPCClient
from msgpackio.server import RPCServer
from msgpackio.exceptions import RemoteException
log = logging.getLogger(__name__)
def add(a, b):
return a + b
def server(**bindings):
import asyncio
async def main():
loop = asyncio.get_running_loop()
server = await loop.create_server(
lambda: RPCServer(**bindings), "127.0.0.1", 8888
)
async with server:
await server.serve_forever()
asyncio.run(main())
clients = [Client]
@pytest.mark.parametrize("cls", clients)
def test_rpc_client_async(cls):
import multiprocessing as mp
import time
s = mp.Process(target=server, kwargs=dict(add=add))
s.start()
try:
with RPCClient(cls("127.0.0.1", 8888)) as client:
future = client.call_async("add", 1, 2)
assert future.ready() == False
future.wait(1)
assert future.ready() == True
assert future.get() == 3
finally:
s.terminate()
@pytest.mark.parametrize("cls", clients)
def test_rpc_client_async_missing_key(cls):
import multiprocessing as mp
import time
s = mp.Process(target=server)
s.start()
try:
with RPCClient(cls("127.0.0.1", 8888)) as client:
future = client.call_async("add", 1, 2)
assert future.ready() == False
future.wait(1)
assert future.ready() == True
with pytest.raises(RemoteException):
print(future.get())
finally:
s.terminate()
|
lock_demo.py
|
"""
@author: magician
@file: lock_demo.py
@date: 2020/8/7
"""
from threading import Thread, Lock
class Counter(object):
"""
Counter
"""
def __init__(self):
self.count = 0
def increment(self, offset):
self.count += offset
def worker(sensor_index, how_many, counter):
"""
worker
@param sensor_index:
@param how_many:
@param counter:
@return:
"""
for _ in range(how_many):
counter.increment(1)
class LockingCounter(object):
"""
LockingCounter
"""
def __init__(self):
self.lock = Lock()
self.count = 0
def increment(self, offset):
with self.lock:
self.count += offset
def run_threads(func, how_many, counter):
"""
run_threads
@param func:
@param how_many:
@param counter:
@return:
"""
threads = []
for i in range(5):
args = (i, how_many, counter)
thread = Thread(target=func, args=args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
how_many = 10 ** 5
counter = Counter()
run_threads(worker, how_many, counter)
print('Counter should be %d, found %d' % (5 * how_many, counter.count))
counter = LockingCounter()
run_threads(worker, how_many, counter)
print('Counter should be %d, found %d' % (5 * how_many, counter.count))
|
test_gc.py
|
import unittest
from test.test_support import verbose, run_unittest, start_threads
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n") in d
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (3, 0, 0))
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setcheckinterval(old_checkinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + range(5))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(u"a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print "restoring automatic collection"
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
scapy_isolated_test.py
|
#!/usr/bin/python
import sys, os
from multiprocessing import Process
import tempfile
def check_offsets(build, stdout, scapy_str):
import sys
sys.stdout = stdout
sys.stderr = stdout
# clean this env
for key in sys.modules.copy().keys():
if key.startswith('scapy.'):
del sys.modules[key]
globals().clear()
import outer_packages
from scapy.all import Ether, IP, UDP
pkt = eval(scapy_str)
if build:
pkt.build()
assert pkt
assert pkt.payload
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
if build:
assert lay._offset != 0, 'Offset of second and further layers should not be zero if packets is built.'
else:
assert lay._offset == 0, 'Offset of second and further layers should be zero if packets is not built.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
else:
if build:
if field.get_size_bytes() == 0:
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
else:
assert field._offset == 0, 'Offset of second and further fields should be zero if packets is not built.'
lay = lay.payload
def check_offsets_pcap(stdout, pcap):
import sys
sys.stdout = stdout
sys.stderr = stdout
# clean this env
for key in sys.modules.copy().keys():
if key.startswith('scapy.'):
del sys.modules[key]
globals().clear()
import outer_packages
from scapy.all import Ether, IP, UDP
from scapy.layers.dns import DNS
from scapy.utils import rdpcap
pkt = rdpcap(pcap)[0]
assert pkt
assert pkt.payload
not_built_offsets = {}
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
not_built_offsets[lay.name] = {}
not_built_offsets[lay.name]['_offset'] = lay._offset
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
assert lay._offset != 0, 'Offset of second and further layers should not be zero.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
else:
if field.get_size_bytes() == 0:
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
not_built_offsets[lay.name][field.name] = field._offset
lay = lay.payload
print('')
pkt.build()
lay = pkt
while lay:
print(' ### %s (offset %s)' % (lay.name, lay._offset))
assert not_built_offsets[lay.name]['_offset'] == lay._offset, 'built and not built pcap offsets differ'
lay.dump_fields_offsets()
if lay == pkt:
assert lay._offset == 0, 'Offset of first layer should be zero.'
else:
assert lay._offset != 0, 'Offset of second and further layers should not be zero.'
for index, field in enumerate(lay.fields_desc):
if index == 0:
assert field._offset == 0, 'Offset of first field should be zero.'
else:
if field.get_size_bytes() == 0:
continue
assert field._offset != 0, 'Offset of second and further fields should not be zero if packets is built.'
assert not_built_offsets[lay.name][field.name] == field._offset, 'built and not built pcap offsets differ'
lay = lay.payload
def isolate_env(f, *a, **k):
with tempfile.TemporaryFile(mode = 'w+') as tmpfile:
k['stdout'] = tmpfile
p = Process(target = f, args = a, kwargs = k)
p.start()
p.join()
print('')
tmpfile.seek(0)
print(tmpfile.read())
if p.exitcode:
raise Exception('Return status not zero, check the output')
class CScapyOffsets_Test():
def setUp(self):
self.dir = os.path.abspath(os.path.dirname(__file__)) + '/'
# verify that built packet gives non-zero offsets
def test_offsets_udp_build(self):
isolate_env(check_offsets, scapy_str = "Ether()/IP()/UDP()/('x'*9)", build = True)
# verify that non-built packet gives zero offsets
def test_offsets_udp_nobuild(self):
isolate_env(check_offsets, scapy_str = "Ether()/IP()/UDP()/('x'*9)", build = False)
# verify that pcap either built or not gives same non-zero offsets
def test_offsets_pcap(self):
isolate_env(check_offsets_pcap, pcap = self.dir + 'golden/bp_sim_dns_vlans.pcap')
|
csv_importer.py
|
from openerp import models, api, fields, SUPERUSER_ID
import pdb
import time
import os
import csv
import cStringIO
import threading
import logging
from openerp.sql_db import db_connect
_logger = logging.getLogger( "CSV_Importer" )
class CSVManager(models.Model):
"""
This class will be used as an interface for developers to interact with this library .
This is an Odoo model so developers can directly use their functions by creating the environment of it.
_name = "csv.import.manager"
"""
_name = "csv.import.manager"
def initialize(self, import_operation , import_data, validation_method,
caller_class, db_cols_count = 0, debug = None ):
"""
This function will be used to initialize the library with required parameters.
import_operation : [Required] A meaningful name for import operation like as 'import_contact'
import_data : [Required] Encoded csv data with 'cp850' followed by 'base64' (default in Odoo)
validation_method : [Required] Must be a function in caller_class
caller_class : [Required] Odoo model name i.e. _name
db_cols_count : [Optional] Possible number of columns in CSV File
debug : [Optional] True to print debug message , False to keep silent
Example :-
class Example(models.Model):
_name = "importer.example"
def method_one (self, decoded_data, task_id):
with api.Environment.manage():
self.env = api.Environment( db_connect( self.env.cr.dbname ).cursor(), self.env.uid, self.env.context )
# ... Import Code ...
index = 0
# ... Import Code ...
for row in decoded_data:
index +=1
# ... Import Code ...
return True
Note:- We are using threading so validation function must be
indented within api.Environment.manage() and should reinitialize
environment with api.Environment() .
"""
self.import_operation = import_operation
self.import_data = import_data
self.db_cols_count = db_cols_count
self.validation_method = validation_method
self.caller_class = caller_class
self.debug = debug
def start_import(self):
"""
This function will start the import process if not running already
"""
if CSVHolder.debug: _logger.info("[ Main ] : CSVManager.start_import called")
caller_class_obj = self.env[self.caller_class]
if CSVHolder.debug: _logger.info("[ Main ] : Call validation method to verify its working")
method_exist = False
try:
getattr(caller_class_obj, self.validation_method)
method_exist = True
except:
if CSVHolder.debug: _logger.info("Either validation method does not exist or not accessible ")
if not method_exist:
return False
csv_holder_obj = self.env['csv.import.holder']
if CSVHolder.debug: _logger.info("[ Main ] : Calling CSVHolder.hold_csv with provided details ")
csv_holder_obj.hold_csv(
self.import_operation, self.import_data,
self.db_cols_count, self.caller_class,
self.validation_method, self.debug
)
return True
class CSVHolder(models.Model):
"""
This class is responsible to do actual import operation.
_name = "csv.import.holder"
"""
_name = "csv.import.holder"
debug = False
val_table_name = "csv_importer_details"
import_running = False
def set_debug(self, debug_status):
"""
Set debug status to True or False
"""
CSVHolder.debug = debug_status
def create_validation_table(self):
"""
This function will create validation table which will store the initialized data
"""
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Got request to create csv_importer_details Table " )
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
if not self.is_table_exist(CSVHolder.val_table_name):
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Preparing query ")
query = """
CREATE TABLE %s
( id serial primary key,
import_req_id integer UNIQUE NOT NULL,
import_col_count integer NOT NULL,
import_class text NOT NULL,
import_func text NOT NULL
)
""" % (CSVHolder.val_table_name,)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Prepared, executing ... ")
cur.execute(query)
cur.commit()
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Table created successfully ")
except Exception, e:
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Error in table creation : " + str(e))
finally:
if not cur.closed : cur.close()
def store_validation_details(self, task_id , val_odoo_class, val_odoo_func, db_cols_count):
"""
This function will be used to store the initialization data
"""
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Got request to insert new import task details in csv_importer_details table ")
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Preparing insert query ...")
ins_query = """ INSERT INTO %s
(import_req_id, import_col_count, import_class, import_func)
VALUES (%s,%s,'%s','%s')
""" % (CSVHolder.val_table_name,task_id, db_cols_count, val_odoo_class, val_odoo_func)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Prepared , executing ")
cur.execute(ins_query)
cur.commit()
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Executed ")
except Exception, e:
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Error in task insertion : " + str(e))
finally:
if not cur.closed : cur.close()
def hold_csv(self, import_operation = "",
csv_data = "", db_cols_count = "",
val_class = "", val_func = "", debug = False):
"""
This function will store the provided CSV to disk for temporary storage
and then will do the import process
"""
if CSVHolder.debug: _logger.info("[ CSVHolder ] : CSVHolder.hold_csv started ")
self.import_operation = import_operation
self.csv_data = csv_data
self.db_cols_count = db_cols_count
self.val_class = val_class
self.val_func = val_func
CSVHolder.debug = debug
if (import_operation == "" or csv_data == "" ):
if debug is None:
return False
else:
return {
"status" : False,
msg : "Required parameters have empty values"
}
self.table_name = 'buffer_' + import_operation
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Got request for new task : " + import_operation)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Check whether validation table exist or not")
if not self.is_table_exist(CSVHolder.val_table_name):
self.create_validation_table()
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Requesting to write data on disk ")
# if CSVHolder.debug: _logger.info(threading.current_thread().name , "Table is not empty "
threading.Thread( target=self.__write_csv_to_disk, name="[Thread : Write To Disk]" ).start()
if CSVHolder.debug: _logger.info("Importer Running : "+ str(CSVHolder.import_running))
if not CSVHolder.import_running:
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Calling importer simultaneously ")
threading.Thread( target=self.start_data_import, name="[Thread : Start Import]" ).start()
def is_buffer_table_match(self, task_details):
"""
This function will check current buffer table structure and the structure
needed for the task in hand for import purpose
"""
table_name = "buffer_" + task_details['import_operation']
table_col_count = task_details['db_count']
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
query = "SELECT count(*) FROM information_schema.columns WHERE table_name='%s'" %(table_name,)
cur.execute(query)
col_count = int(cur.fetchone()[0])
if not cur.closed: cur.close()
if CSVHolder.debug: _logger.info(" Table Have " + str(col_count - 2) + " columns vs " + str(table_col_count) + " count")
if table_col_count == ( col_count - 2 ) :
return True
else:
return False
pass
def remove_buffer_table(self, table_name = None):
"""
This function will clear the buffer table
"""
if CSVHolder.debug: _logger.info("Remove BUffer Table ")
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
table_name = self.table_name if table_name == None else table_name
query = "DROP TABLE IF EXISTS " + table_name
if CSVHolder.debug: _logger.info("Remove Query: " + query)
cur.execute("BEGIN")
cur.execute("LOCK TABLE " + table_name + " IN EXCLUSIVE MODE NOWAIT")
cur.execute(query)
cur.commit()
if not cur.closed: cur.close()
if CSVHolder.debug: _logger.info("Done")
@staticmethod
def remove_csv_file(import_file_name):
"""
This function will take the file name and remove it.
"""
module_name = 'csv_importer'
complete_path = __file__[:__file__.index(module_name)] + import_file_name
os.remove(complete_path)
def start_data_import(self, external_task = None):
"""
This function will start import process.
"""
if CSVHolder.debug: _logger.info("[ CSVHolder ] : start_data_import started ... ")
# Mark Import Thread as running
CSVHolder.import_running = True
val_db_count = 0
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
join_time = time.time()
if CSVHolder.debug: _logger.info("Join Time : " + str(join_time))
while True:
pending_imports = self.get_pending_import_request(external_task)
if CSVHolder.debug:
print "\n\n[ CSVHolder ] : Here is pending task ", pending_imports
for task in pending_imports:
if external_task != None and external_task != task['id']:
continue
id = task['id']
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Picked task with Id : " + str(id))
task_buff_table = "buffer_" + task['import_operation']
if CSVHolder.debug: _logger.info("Buffer Table for this task is : " + task_buff_table)
is_buffer_table_exist = self.is_table_exist(task_buff_table)
if not is_buffer_table_exist :
if CSVHolder.debug: _logger.info("[ CSVHolder ] : But it does not exist, requesting to create it .... ")
self.create_buffer_table(task)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Now Buffer Table Created " )
else:
if CSVHolder.debug: _logger.info("Buffer Table Exist ")
if CSVHolder.debug: _logger.info(" Before Task Value :" + str(task))
if not self.is_buffer_table_match(task):
if CSVHolder.debug: _logger.info(" But Structure did not match ")
self.remove_buffer_table(task_buff_table)
if CSVHolder.debug: _logger.info(" After Task Value :" + str(task))
self.create_buffer_table(task)
# Clear Buffer Table
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Cleared Buffer Table to process : "+ str(id))
self.remove_data_from_buffer_table(task_buff_table)
# MOve this task data to buffer table
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Asked to move data to buffer table "+ str(id))
self.move_data_from_disk_to_buff(task)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Moved CSV File Data TO Buffer Table "+ str(id))
# Do Import Operation
buffer_table_empty = self.is_buffer_table_empty(task_buff_table)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Is Buffer Table Empty : "+ str(buffer_table_empty))
if not buffer_table_empty:
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Now started process: "+ str(id))
data_from_db = []
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Fetching data from Buffer Table"+ str(id))
cur.execute("SELECT * FROM " + task_buff_table)
recs = cur.fetchall()
col_to_ignore = ['id','is_imported']
for item in recs:
data_from_db.append(item[1:-1]) # Ignore 1st and last column value
if CSVHolder.debug: _logger.info("[ CSVHolder ] : We have now data "+ str(id))
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Calling Validation Function "+ str(id))
call_func = getattr(env[task['class']],task['func'])
is_completed = call_func (data_from_db, id)
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Got response "+ str(is_completed))
if is_completed is True:
if CSVHolder.debug: _logger.info("Finished with one import task "+ str(id))
self.update_task_progress(id, 100)
self.remove_data_from_buffer_table(task_buff_table);
self.remove_csv_file(task['import_file_name'])
else:
if CSVHolder.debug:
print "[ CSVHolder ] : Got -ve response from user , we will keep ",
print "the file to do import later ", id
else:
if CSVHolder.debug: _logger.info("Buffer Table is already empty, we can't do anything ")
pending_imports = self.get_pending_import_request(external_task)
complete_time = time.time()
if external_task != None :
break
if len(pending_imports) == 0 and int(round(complete_time - join_time)) > 2:
break
if not cur.closed: cur.close()
if CSVHolder.debug: _logger.info("Quite Time : "+ str(complete_time))
CSVHolder.import_running = False
def remove_data_from_buffer_table(self, buffer_table):
"""
It will remove data from buffer table
"""
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Got request to clear buffer table: "+ str(buffer_table))
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
if CSVHolder.debug: _logger.info("Obtaining Lock before Data Removal")
cur.execute("BEGIN")
cur.execute("LOCK TABLE " + buffer_table + " IN EXCLUSIVE MODE NOWAIT")
if CSVHolder.debug: _logger.info("Lock Obtained")
cur.execute("DELETE FROM "+ buffer_table)
cur.commit()
except Exception,e :
cur.rollback()
if CSVHolder.debug: _logger.info("Exception in clearing table " + str(e))
finally:
if not cur.closed: cur.close()
if CSVHolder.debug: _logger.info("[ CSVHolder ] : Cleared : "+ str(buffer_table))
def update_task_progress(self, task_id, progress_counter):
"""
It will update the import progress
Use:-
class Example(models.Model):
_name = "importer.example"
def method_one (self, decoded_data, task_id):
with api.Environment.manage():
self.env = api.Environment( db_connect( self.env.cr.dbname ).cursor(), self.env.uid, self.env.context )
# ... Import Code ...
index = 0
csv_holder = self.env['csv.import.holder']
# ... Import Code ...
for row in decoded_data:
index +=1
# ... Import Code ...
csv_holder.update_task_progress(task_id, index )
# ... Import Code ...
return True
"""
progress_counter = 99 if progress_counter > 100 else progress_counter;
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
finished_date = fields.Datetime.now() if progress_counter == 100 else None
csv_import_logger_obj = env['csv.import.logger']
search_data = csv_import_logger_obj.search([('id','=', task_id)])
search_data.import_progress = progress_counter
if finished_date is not None:
search_data.import_fdate = finished_date
cur.commit()
if not cur.closed: cur.close()
def move_data_from_disk_to_buff(self, task_details):
"""
This function will load data from hard disk to buffer table
"""
func_success = False
task_id = task_details['id']
import_file_name = task_details['import_file_name']
import_operation = task_details['import_operation']
db_cols_count = task_details['db_count']
table_name = "buffer_" + import_operation
module_name = 'csv_importer'
if CSVHolder.debug: _logger.info("Got request to move file from disk to buffer : "+ str(task_id))
complete_path = __file__[:__file__.index(module_name)] + import_file_name
if CSVHolder.debug:
print "File should be on location " , complete_path
print "Fetched file location ", task_id
file = open(complete_path, "r")
decoded_data = self.read_encoded_csv_data(file.read())
# pdb.set_trace()
if CSVHolder.debug: _logger.info("Data Loaded from File to Memory"+ str(task_id))
if CSVHolder.debug: _logger.info(" Insert count " + str(db_cols_count))
insert_qry = "INSERT INTO %s "+ str(tuple(['col' + str(x) for x in range(1,db_cols_count+1)])).replace("'","")
insert_qry += " values "
insert_qry = insert_qry % (table_name,)
values = []
for row in decoded_data :
insert_qry += "(" + ("%s," * ( int(db_cols_count)) ).rstrip(",") + "), "
values.extend(row[:db_cols_count])
insert_qry = insert_qry.rstrip(", ")
if CSVHolder.debug: _logger.info("Insert Query "+ str((values)))
if CSVHolder.debug: _logger.info("Preparing to insert data "+ str(task_id))
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
if CSVHolder.debug: _logger.info("Trying to obtain lock on table " + table_name)
cur.execute("BEGIN")
cur.execute("LOCK TABLE " + table_name + " IN EXCLUSIVE MODE NOWAIT ")
if CSVHolder.debug: _logger.info("Success ")
cur.execute(insert_qry, tuple(values))
cur.commit()
if CSVHolder.debug: _logger.info("Data Inserted for task "+ str(task_id))
qry = "SELECT count(*) FROM " + table_name
cur.execute(qry)
if CSVHolder.debug: _logger.info(str(cur.fetchone()[0]) + " records inserted" )
func_success = True
except Exception, e:
func_success = False
if not cur.closed : cur.rollback()
if CSVHolder.debug: _logger.info("Failed " + str(e))
finally:
if not cur.closed : cur.close()
return func_success
def get_pending_import_request(self, external_task = None):
"""
We can get the pending import list using that function
"""
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
data = []
sel_col = 'details.id "details_id",details.import_col_count "db_count"'
sel_col += ', details.import_class "class", details.import_func "func"'
sel_col += ', logger.id, logger.import_operation'
sel_col += ', logger.import_progress'
sel_col += ', logger.import_sdate, logger.import_fdate'
sel_col += ', logger.import_file_name'
from_tab = ' csv_import_logger logger, csv_importer_details details'
if external_task is not None:
where_cond = ' logger.id = details.import_req_id AND logger.id = ' + str(external_task)
else:
where_cond = ' logger.id = details.import_req_id AND logger.import_progress = 0 '
query = 'SELECT ' + sel_col + " FROM " + from_tab + " WHERE "+ where_cond
cur.execute(query)
recs = cur.dictfetchall()
finally:
if not env.cr.closed: env.cr.close()
return recs
def is_buffer_table_empty(self, buffer_table = None):
"""
return
True - if buffer table is empty
False - if buffer table is not empty
"""
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
buffer_table = buffer_table if buffer_table != None else self.table_name;
env.cr.execute("SELECT count(*) FROM " + buffer_table)
res = int(env.cr.fetchone()[0])
if not env.cr.closed: env.cr.close()
if res == 0:
return True
else:
return False
def read_file_from_path(self, filename):
"""
This function will read the given file and return the content of the file
"""
complete_path = __file__[:__file__.index("csv_importer")] + filename
try:
file = open(complete_path, "r")
content = file.read()
finally:
file.close()
return content
def create_buffer_table(self, task_details):
"""
It is responsible to create the buffer table for particular task
"""
buffer_table_name = "buffer_" + task_details['import_operation']
buffer_table_col_count = task_details['db_count']
if CSVHolder.debug: _logger.info("Got request to create buffer table for Table %s having %d columns " % (buffer_table_name, buffer_table_col_count))
if CSVHolder.debug: _logger.info(" Preparing create query ...")
create_query = """CREATE TABLE %s (
id serial primary key,
%s
is_imported integer default -1
);
"""
if buffer_table_col_count == 0:
if CSVHolder.debug: _logger.info(" Validation Table have 0 count")
csv_rows = self.read_encoded_csv_data(self.read_file_from_path(task_details['import_file_name']))
# pdb.set_trace()
for row in csv_rows:
new_db_cols_count = len(row)
print row
break
else:
new_db_cols_count = buffer_table_col_count
# pdb.set_trace()
task_details['db_count'] = new_db_cols_count
if CSVHolder.debug: _logger.info(" New Table Count is : " + str(new_db_cols_count))
cols = ""
for i in range(1,new_db_cols_count + 1):
cols += "col" + str(i) + " text,\n"
create_query = create_query % (buffer_table_name, cols)
_logger.info("Create Query : " + create_query)
if CSVHolder.debug: _logger.info(" Prepared ")
with api.Environment.manage():
# As this function is in a new thread, I need to open a new cursor, because the old one may be closed
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
if CSVHolder.debug: _logger.info(" Executing prepared create query ...")
cur.execute(create_query)
cur.commit()
cur.execute("UPDATE csv_importer_details SET import_col_count = " + str(new_db_cols_count) + " WHERE import_req_id = " + str(task_details['id']))
cur.commit()
if CSVHolder.debug: _logger.info(" Executed ")
finally:
if not cur.closed: cur.close()
def is_table_exist(self, table_name = None):
"""
return
True - if table exist
False - if table does not exist
"""
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
table_name = self.table_name if table_name is None else table_name
try:
cur.execute("select exists(select * from information_schema.tables where table_name=%s)", (table_name,))
result = cur.fetchone()[0]
# pdb.set_trace()
finally:
if not cur.closed: cur.close()
return result
def update_log(self, import_operation, file_name):
"""
It will write log for the import task
"""
if CSVHolder.debug: _logger.info("Function : update_log() start")
result = []
with api.Environment.manage():
env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)
cur = env.cr
try:
logger_obj = env['csv.import.logger']
result = logger_obj.write_log(import_operation, file_name)
except Exception, e:
if CSVHolder.debug: _logger.info("Error in log updation : " + str(e))
finally:
if not env.cr.closed : env.cr.close()
if CSVHolder.debug: _logger.info("Function : update_log() end")
return result
def __write_csv_to_disk(self):
"""
It will be used to write csv data in a file
"""
if CSVHolder.debug: _logger.info("Got request to write csv to disk")
module_name = 'csv_importer'
file_name = "file" + ('%.6f' % (time.time(), )).replace('.','_')
upload_dir = "/uploads/"
filename_for_db = module_name + upload_dir + file_name
complete_name = __file__[:__file__.index(module_name)] + filename_for_db
successfully_written = False
try:
file = open(complete_name,"w+")
file.write(self.csv_data)
successfully_written = True
except :
file.close()
complete_name = False
if(type(complete_name) == type("")):
rec = self.update_log(self.import_operation, filename_for_db)
self.store_validation_details(rec.id, self.val_class, self.val_func, self.db_cols_count)
@staticmethod
def read_encoded_csv_data(encoded_csv_data):
"""
It will be used to return the plain data from a encoded CSV
"""
csv_file = cStringIO.StringIO( encoded_csv_data.decode( 'base64' ).decode( 'cp850' ).encode( 'utf8' ) )
reader = csv.reader( csv_file, delimiter = '|', quotechar = '"' )
return reader
class ImportLogger(models.Model):
"""
This class will be used to store the requested import task as a log..
_name = "csv.import.logger"
"""
_name = "csv.import.logger"
import_operation = fields.Char('Import Operation', required=True)
import_sdate = fields.Datetime('Requested Date', default = lambda self: fields.Datetime.now(), required = True)
import_fdate = fields.Datetime('Finished Date', default = None)
import_progress = fields.Integer('Progress ', default = 0)
import_file_name = fields.Char('Import File Name ', default = None)
def write_log(self, import_operation = '', import_file_name = ''):
"""
This function will write log for a task.
"""
if CSVHolder.debug: _logger.info("Got request to write new log ")
with api.Environment.manage():
self.env = api.Environment( db_connect( self.env.cr.dbname ).cursor(), self.env.uid, self.env.context )
if CSVHolder.debug: _logger.info(" Finally came to write log")
try:
data = {
'import_operation' : import_operation,
'import_file_name' : import_file_name
}
rec = self.create(data)
self.env.cr.commit()
if CSVHolder.debug: _logger.info(" Done write log" + str(rec))
except Exception,e:
if CSVHolder.debug: _logger.info(" Error in write_log() " + str(e))
finally:
if not self.env.cr.closed: self.env.cr.close()
return rec
@api.multi
def name_get(self):
"""
It will be used to get the name for each import task in form view
"""
rec = super(ImportLogger, self).name_get()
rec [0] = (rec[0][0], "Task "+ str(rec[0][0]))
return rec
@api.one
def start_import(self):
"""
User can manually start import process for a pending task using
that function
"""
task_id = self.id
csv_holder_obj = self.env['csv.import.holder']
_logger.info(" Going to start import for task : " + str(task_id))
csv_holder_obj.set_debug(True)
is_done = csv_holder_obj.start_data_import(task_id)
_logger.info(" Done import : " + str(task_id))
@api.multi
def unlink(self):
"""
It will be called when user will remove task(s) from UI.
"""
# pdb.set_trace()
ids = [row.id for row in self]
resp = super(ImportLogger, self).unlink()
if isinstance(ids, (int, long)):
ids = [ids]
_logger.info(" Got request to delete : " + str(ids))
self.remove_imported_records(ids)
_logger.info(" Response from super : " + str(resp))
return resp
def remove_imported_records(self, ids):
with api.Environment.manage():
env = api.Environment( db_connect( self.env.cr.dbname ).cursor(), SUPERUSER_ID, self.env.context )
try:
query = "DELETE FROM csv_importer_details WHERE import_req_id in (" + ','.join([str(id) for id in ids]) + ")"
env.cr.execute(query)
env.cr.commit()
except Exception,e:
if CSVHolder.debug: _logger.info(" Error in delete query " + str(e))
finally:
if not env.cr.closed: env.cr.close()
|
router.py
|
"""Router - handle message router (base class)
Router to manage responses.
"""
from abc import abstractmethod
import logging
import threading
from typing import Dict, Optional
from typing import TYPE_CHECKING
import uuid
from .message_future import MessageFuture
from ..lib import debug_log
if TYPE_CHECKING:
from queue import Queue
from wandb.proto import wandb_internal_pb2 as pb
logger = logging.getLogger("wandb")
class MessageRouterClosedError(Exception):
"""Router has been closed."""
pass
class MessageFutureObject(MessageFuture):
def __init__(self) -> None:
super(MessageFutureObject, self).__init__()
def get(self, timeout: int = None) -> Optional["pb.Result"]:
is_set = self._object_ready.wait(timeout)
if is_set and self._object:
return self._object
return None
class MessageRouter(object):
_pending_reqs: Dict[str, MessageFutureObject]
_request_queue: "Queue[pb.Record]"
_response_queue: "Queue[pb.Result]"
def __init__(self) -> None:
self._pending_reqs = {}
self._lock = threading.Lock()
self._join_event = threading.Event()
self._thread = threading.Thread(target=self.message_loop)
self._thread.name = "MsgRouterThr"
self._thread.daemon = True
self._thread.start()
@abstractmethod
def _read_message(self) -> "Optional[pb.Result]":
raise NotImplementedError
@abstractmethod
def _send_message(self, record: "pb.Record") -> None:
raise NotImplementedError
def message_loop(self) -> None:
while not self._join_event.is_set():
try:
msg = self._read_message()
except EOFError:
# On abnormal shutdown the queue will be destroyed underneath
# resulting in EOFError. message_loop needs to exit..
logger.warning("EOFError seen in message_loop")
break
except MessageRouterClosedError:
logger.warning("message_loop has been closed")
break
if not msg:
continue
self._handle_msg_rcv(msg)
def send_and_receive(
self, rec: "pb.Record", local: Optional[bool] = None
) -> MessageFuture:
rec.control.req_resp = True
if local:
rec.control.local = local
rec.uuid = uuid.uuid4().hex
future = MessageFutureObject()
with self._lock:
self._pending_reqs[rec.uuid] = future
self._send_message(rec)
return future
def join(self) -> None:
self._join_event.set()
self._thread.join()
def _handle_msg_rcv(self, msg: "pb.Result") -> None:
with self._lock:
future = self._pending_reqs.pop(msg.uuid, None)
if future is None:
# TODO (cvp): saw this in tests, seemed benign enough to ignore, but
# could point to other issues.
if msg.uuid != "":
debug_log.log_message_assert(msg)
logger.warning(
"No listener found for msg with uuid %s (%s)", msg.uuid, msg
)
return
future._set_object(msg)
|
test_threading_2.py
|
# testing gevent's Event, Lock, RLock, Semaphore, BoundedSemaphore with standard test_threading
from __future__ import print_function
from six import xrange
setup_ = '''from gevent import monkey; monkey.patch_all()
from gevent.event import Event
from gevent.lock import RLock, Semaphore, BoundedSemaphore
from gevent.thread import allocate_lock as Lock
import threading
threading.Event = Event
threading.Lock = Lock
threading.RLock = RLock
threading.Semaphore = Semaphore
threading.BoundedSemaphore = BoundedSemaphore
if not hasattr(threading, 'current_thread'):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, 'name'):
threading.Thread.name = property(lambda self: self.getName())
if not hasattr(threading.Thread, 'is_alive'):
threading.Thread.is_alive = threading.Thread.isAlive
if not hasattr(threading.Thread, 'daemon'):
threading.Thread.daemon = property(threading.Thread.isDaemon, threading.Thread.setDaemon)
if not hasattr(threading._Condition, 'notify_all'):
threading._Condition.notify_all = threading._Condition.notifyAll
'''
exec(setup_)
setup_3 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_4 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_5 = '\n'.join(' %s' % line for line in setup_.split('\n'))
try:
from test import support
from test.support import verbose
except ImportError:
from test import test_support as support
from test.test_support import verbose
import random
import re
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
import time
import unittest
import weakref
import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' % (
self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' % (
self.name, self.nrunning.get()))
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
threads.append(t)
if hasattr(t, 'ident'):
self.failUnlessEqual(t.ident, None)
self.assert_(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
if hasattr(t, 'ident'):
self.failIfEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
if sys.version_info[:2] > (2, 5):
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# in gevent, we actually clean up threading._active, but it's not happended there yet
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def SKIP_test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print("test_PyThreadState_SetAsyncExc can't import ctypes")
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
worker_started.wait()
if verbose:
print(" verifying worker hasn't exited")
self.assert_(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
if sys.version_info[:2] > (2, 5):
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
if sys.version_info[:2] > (2, 5):
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
del ctypes # pyflakes fix
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
%s
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""" % setup_4])
self.assertEqual(rc, 42)
if sys.version_info[:2] > (2, 5):
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
%s
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is: %%r" %% sleep)
threading.Thread(target=child).start()
raise SystemExit
""" % setup_5],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.strip()
assert re.match('^Woke up, sleep function is: <.*?sleep.*?>$', stdout), repr(stdout)
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
if sys.version_info[:2] > (2, 5) and not hasattr(sys, 'pypy_version_info'):
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another': self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
%s
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
\n""" % setup_3 + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
# skip disable because I think the bug shouldn't apply to gevent -- denis
#if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
# print(('Skipping test_3_join_in_forked_from_thread'
# ' due to known OS bugs on'), sys.platform, file=sys.stderr)
# return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join)
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def main():
support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
main()
|
utils.py
|
'''
Author: Justin Chen
Date: 11/8/2020
'''
import os
import re
import json
import platform
import unicodedata
import urllib.request
from urllib.error import URLError, HTTPError
from threading import Thread
from multiprocessing import Process, cpu_count
from pathlib import Path
from tqdm import tqdm
from bs4 import BeautifulSoup
from colorama import Fore, Style
'''
Save json data
inputs:
filename (str) Name of output file
data (list) JSON object
'''
def save_json(save_dir, filename, data):
with open(os.path.join(save_dir, filename+'.json'), 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
'''
Get the year generator
: For range of years e.g. 2010:2020
, List of years, not necessarily consecutive list e.g. 1987, 2018, 2020
# Single numbers for just that one year e.g. 2020
inputs:
year (str) Year designation
outputs:
years (list) List of years
'''
def get_years(year):
if len(year) == 0: []
if re.match('\d+:\d+', year):
year = year.split(':')
return range(int(year[0]), int(year[1])+1)
return [y for y in year.split(',') if len(y) > 0]
'''
Format filename
inputs:
filename (str) Format for filename. Any permutation of 'year-auth-title'. Does not
need to contain all three.
year (str) Year of paper
name (str) Author name
affiliation (str) Affiliation
title (str) Paper title
outputs:
filename (str) Formatted filename
'''
def format_filename(filename, year, auth, affiliation, title):
title = title.lower().replace(':', '').replace('/', ' ')
filename = filename.replace('author', auth.lower())
filename = filename.replace('year', str(year))
filename = filename.replace('affiliation', affiliation)
return filename.replace('title', title)+'.pdf'
'''
Get specific version of the pdf. If version cannot be found, the latest version is returned.
inputs:
pdf_ids (list) Collection of Arxiv PDF ids. List should contain same Arxiv ids.
version (int, optional) Version number starting at 1. -1 for the latest version. Default: -1.
outputs:
pdf_id (str) PDF id of version number
'''
def get_pdf_version(pdf_ids, version=-1):
version_ids, v = [], 1
for p in pdf_ids:
i = p['href'].rfind('v')+1
v = int(p['href'][i:])
if v == version: return v
version_ids.append(v)
return max(version_ids)
'''
Check that the pdf id matches the title of the arxiv page
inputs:
pdf_id (str) Arxiv pdf id
title (str) Given title to search for
version (int, optional) PDF version
outputs:
match (bool) True if the id matches the given title
'''
def verify_pdf(pdf_id, title, version=''):
url, resp = f'https://arxiv.org/abs/{pdf_id}', ''
if len(version) > 0: url += f'v{version}'
try:
resp = urllib.request.urlopen(url)
soup = BeautifulSoup(resp.read(), 'html.parser')
page_title = soup.find('h1', {'class': 'title mathjax'}).find_all(text=True)
return title.strip() in page_title
except (HTTPError, URLError) as e:
print(e)
return False
'''
inputs:
authors (list) List of strings of authors
affiliations (list) List of author affiliations
first_name (bool, optional) Extract first name of first author only. Default: False.
last_name (bool, optional) Extract last name of first author only. Default: False.
author_only (bool, optional) Extract first author's name only. Default: True.
affiliation_only (bool, optional) Extract first author's affiliation only. Default: False.
outputs:
name (str) First author
aff (str) First author's affiliation
'''
def get_first_author(authors, affiliations, last_name=False, first_name=False, author_only=True, affilition_only=False):
name, aff = authors[0], affiliations[0]
if first_name and author_only: return name.split(' ')[0], ''
if last_name and author_only: return name.split(' ')[-1], ''
if first_name and not author_only: return name.split(' ')[0], aff
if last_name and not author_only: return name.split(' ')[-1], aff
if author_only: return name, ''
if affilition_only: return '', aff
return name, aff
'''
Remove control characters
inputs:
s (str) String with control characters
output:
s (str) Cleaned string
'''
def remove_ctrl_char(s):
s = ''.join(c for c in s if unicodedata.category(c)[0]!='C')
return ' '.join(s.split('\t'))
'''
Arxiv always displays the latest version of the paper. When the request is made, only the current
version of the paper will appear in the html.
inputs:
title (str) Title of paper
latest (bool, optional) If True, get the latest version of the paper
version (int, optional) PDF version
outputs:
href (str) PDF URL
updated (str) Date of PDF
authors (list)
'''
def get_arxiv_link(title, latest=True, version=''):
title_query = remove_ctrl_char(title).replace(' ', '%20').replace(':', '')
query = f'http://export.arxiv.org/api/query?search_query={title_query}'
resp = urllib.request.urlopen(query)
soup = BeautifulSoup(resp.read(), 'html.parser')
entries = soup.find_all('entry')
url, updated, authors = None, '', []
# It's possible that the authors did not upload their paper to Arxiv.
for e in entries:
if e.find('title').text == title:
url = e.find('link', {'title': 'pdf'})['href']
url = url[:url.rfind('v')]+'.pdf'
updated = e.find('updated').text.split('T')[0]
authors = [a.find('name').text for a in e.find_all('author')]
return url, updated, authors
'''
Download a paper.
inputs:
url (str) Link to paper
save_dir (str) Directory to save to
filename (str) Name of file
outputs:
True if downloaded, else False
'''
def download(url, save_dir, filename):
try:
save_path = os.path.join(save_dir, filename)
if not Path(save_path).is_file():
with urllib.request.urlopen(url) as resp, open(save_path, 'wb') as out:
file, headers = urllib.request.urlretrieve(url, save_path)
return len(file) > 0
else:
return True
except URLError as e:
return False
'''
Format and download paper
inputs:
title (str) Paper title
authors (list) Authors
affiliations (list) Author affiliations
url (str) PDF url
year (str) Publication year
template (str) File name template
save_dir (str) Save directory
'''
def save_paper(title, authors, affiliations, url, year, template, save_dir):
auth, aff = get_first_author(authors, affiliations, last_name=True)
filename = format_filename(template, year, auth, aff, title)
status = download(url, save_dir, filename)
if not status: print(f'{Fore.RED}err{Style.RESET_ALL}: {title}')
'''
Main execution loop for scraping and downloading papers.
inputs:
papers (list) List of dicts of paper meta data
save_dir (str) Save directory
'''
def scrape(papers, year, template, save_dir):
def batch(iterable, size=1):
l = len(iterable)
for i in range(0, l, size):
yield iterable[i:min(i + size, l)]
pbar = tqdm(total=len(papers))
for block in batch(papers, 16):
procs = []
for paper in block:
args = (paper['title'], paper['authors'], paper['affiliations'], paper['url'], year, template, save_dir,)
procs.append(Thread(target=save_paper, args=args))
for p in procs: p.start()
for p in procs: p.join()
pbar.update(len(block))
pbar.close()
_, _, files = next(os.walk(save_dir))
successes = len([f for f in files if f.endswith('.pdf')])
print(f'downloaded {successes} papers')
|
follow_waypoints_path.py
|
#!/usr/bin/env python
import threading
import rospy
import actionlib
from smach import State,StateMachine
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, PoseArray ,PointStamped, PoseStamped
from nav_msgs.msg import Path
from std_msgs.msg import Empty
from tf import TransformListener
import tf
import math
import rospkg
import csv
import time
#Path for saving and retreiving the pose.csv file
output_file_path = rospkg.RosPack().get_path('test_slam')+"/saved_path/pose.csv"
waypoints = []
class FollowPath(State):
def __init__(self):
State.__init__(self, outcomes=['success'], input_keys=['waypoints'])
self.frame_id = rospy.get_param('~goal_frame_id','map')
self.odom_frame_id = rospy.get_param('~odom_frame_id','odom')
self.base_frame_id = rospy.get_param('~base_frame_id','base_footprint')
self.duration = rospy.get_param('~wait_duration', 0.0)
# Get a move_base action client
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
rospy.loginfo('Connecting to move_base...')
self.client.wait_for_server()
rospy.loginfo('Connected to move_base.')
rospy.loginfo('Starting a tf listner.')
self.tf = TransformListener()
self.listener = tf.TransformListener()
self.distance_tolerance = rospy.get_param('waypoint_distance_tolerance', 0.0)
def execute(self, userdata):
global waypoints
# Execute waypoints each in sequence
for waypoint in waypoints:
# Break if preempted
if waypoints == []:
rospy.loginfo('The waypoint queue has been reset.')
break
# Otherwise publish next waypoint as goal
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = self.frame_id
goal.target_pose.pose.position = waypoint.pose.position
goal.target_pose.pose.orientation = waypoint.pose.orientation
rospy.loginfo('Executing move_base goal to position (x,y): %s, %s' %
(waypoint.pose.position.x, waypoint.pose.position.y))
rospy.loginfo("To cancel the goal: 'rostopic pub -1 /move_base/cancel actionlib_msgs/GoalID -- {}'")
self.client.send_goal(goal)
if not self.distance_tolerance > 0.0:
self.client.wait_for_result()
rospy.loginfo("Waiting for %f sec..." % self.duration)
time.sleep(self.duration)
else:
#This is the loop which exist when the robot is near a certain GOAL point.
distance = 10
while(distance > self.distance_tolerance):
now = rospy.Time.now()
self.listener.waitForTransform(self.odom_frame_id, self.base_frame_id, now, rospy.Duration(4.0))
trans,rot = self.listener.lookupTransform(self.odom_frame_id,self.base_frame_id, now)
distance = math.sqrt(pow(waypoint.pose.position.x-trans[0],2)+pow(waypoint.pose.position.y-trans[1],2))
return 'success'
def convert_PoseWithCovArray_to_PoseArray(waypoints):
"""Used to publish waypoints as pose array so that you can see them in rviz, etc."""
poses = PoseArray()
poses.header.frame_id = rospy.get_param('~goal_frame_id','map')
poses.poses = [pose.pose for pose in waypoints]
return poses
class GetPath(State):
def __init__(self):
State.__init__(self, outcomes=['success'], input_keys=['waypoints'], output_keys=['waypoints'])
# Create publsher to publish waypoints as pose array so that you can see them in rviz, etc.
# self.poseArray_publisher = rospy.Publisher('/waypoints', PoseArray, queue_size=1)
# Start thread to listen for reset messages to clear the waypoint queue
def wait_for_path_reset():
"""thread worker function"""
global waypoints
while not rospy.is_shutdown():
data = rospy.wait_for_message('/path_reset', Empty)
rospy.loginfo('Recieved path RESET message')
self.initialize_path_queue()
rospy.sleep(3) # Wait 3 seconds because `rostopic echo` latches
# for three seconds and wait_for_message() in a
# loop will see it again.
reset_thread = threading.Thread(target=wait_for_path_reset)
reset_thread.start()
def initialize_path_queue(self):
global waypoints
waypoints = [] # the waypoint queue
# publish empty waypoint queue as pose array so that you can see them the change in rviz, etc.
# self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
def execute(self, userdata):
global waypoints
self.initialize_path_queue()
self.path_ready = False
# Start thread to listen for when the path is ready (this function will end then)
# Also will save the clicked path to pose.csv file
def wait_for_path_ready():
"""thread worker function"""
data = rospy.wait_for_message('/path_ready', Empty)
rospy.loginfo('Recieved path READY message')
self.path_ready = True
with open(output_file_path, 'w') as file:
for current_pose in waypoints:
file.write(str(current_pose.pose.position.x) + ',' + str(current_pose.pose.position.y) + ',' + str(current_pose.pose.position.z) + ',' + str(current_pose.pose.orientation.x) + ',' + str(current_pose.pose.orientation.y) + ',' + str(current_pose.pose.orientation.z) + ',' + str(current_pose.pose.orientation.w)+ '\n')
rospy.loginfo('poses written to '+ output_file_path)
ready_thread = threading.Thread(target=wait_for_path_ready)
ready_thread.start()
self.start_journey_bool = False
# Start thread to listen start_jorney
# for loading the saved poses from follow_waypoints/saved_path/poses.csv
def wait_for_start_journey():
"""thread worker function"""
data_from_start_journey = rospy.wait_for_message('start_journey', Empty)
rospy.loginfo('Recieved path READY start_journey')
# with open(output_file_path, 'r') as file:
# reader = csv.reader(file, delimiter = ',')
# for row in reader:
# print row
# current_pose = PoseStamped()
# current_pose.pose.position.x = float(row[0])
# current_pose.pose.position.y = float(row[1])
# current_pose.pose.position.z = float(row[2])
# current_pose.pose.orientation.x = float(row[3])
# current_pose.pose.orientation.y = float(row[4])
# current_pose.pose.orientation.z = float(row[5])
# current_pose.pose.orientation.w = float(row[6])
# waypoints.append(current_pose)
# self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
self.start_journey_bool = True
start_journey_thread = threading.Thread(target=wait_for_start_journey)
start_journey_thread.start()
# topic = "/move_base_simple/goal2"
topic = "/waypoint_nav"
rospy.loginfo("Waiting to recieve waypoints via Pose msg on topic %s" % topic)
rospy.loginfo("To start following waypoints: 'rostopic pub /path_ready std_msgs/Empty -1'")
rospy.loginfo("OR")
rospy.loginfo("To start following saved waypoints: 'rostopic pub /start_journey std_msgs/Empty -1'")
# Wait for published waypoints or saved path loaded
while(1):
try:
recieved_msgs = rospy.wait_for_message(topic, Path, timeout=1)
waypoints=recieved_msgs.poses
break
except rospy.ROSException as e:
if 'timeout exceeded' in e.message:
continue # no new waypoint within timeout, looping...
else:
raise e
rospy.loginfo("Recieved new waypoints")
# waypoints.append(pose)
# publish waypoint queue as pose array so that you can see them in rviz, etc.
# self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
# Path is ready! return success and move on to the next state (FOLLOW_PATH)
return 'success'
class PathComplete(State):
def __init__(self):
State.__init__(self, outcomes=['success'])
def execute(self, userdata):
rospy.loginfo('###############################')
rospy.loginfo('##### REACHED FINISH GATE #####')
rospy.loginfo('###############################')
return 'success'
def main():
rospy.init_node('follow_waypoints')
sm = StateMachine(outcomes=['success'])
with sm:
StateMachine.add('GET_PATH', GetPath(),
transitions={'success':'FOLLOW_PATH'},
remapping={'waypoints':'waypoints'})
StateMachine.add('FOLLOW_PATH', FollowPath(),
transitions={'success':'PATH_COMPLETE'},
remapping={'waypoints':'waypoints'})
StateMachine.add('PATH_COMPLETE', PathComplete(),
transitions={'success':'GET_PATH'})
outcome = sm.execute()
if __name__ == '__main__':
main()
|
redis.py
|
import functools
import logging
from threading import Thread
import redis
from PyQt5.QtCore import QThread, pyqtSignal
from surirobot.core.common import ehpyqtSlot, QSuperTimer, State
class RedisService(QThread):
LISTEN_INTERVAL = 1000
update_state = pyqtSignal(str, int, dict)
MODULE_NAME = 'redis'
def __init__(self, url, port=6379):
QThread.__init__(self)
self.listenTasks = {}
self.logger = logging.getLogger(type(self).__name__)
self.redis = redis.StrictRedis(host=url, port=port)
self.pub = self.redis.pubsub()
def __del__(self):
for key, value in self.listenTasks.items():
try:
value['timer'].stop()
except:
pass
def listen(self, channel):
if not self.listenTasks.get(channel):
p = self.redis.pubsub()
p.subscribe(channel)
func = functools.partial(self.listen_thread, channel)
timer = QSuperTimer()
timer.timeout.connect(func)
timer.setInterval(self.LISTEN_INTERVAL)
self.listenTasks[channel] = {'pub': p, 'timer': timer}
timer.start()
def mute(self, channel):
if self.listenTasks.get(channel):
self.listenTasks[channel]['timer'].stop()
del self.listenTasks[channel]
def listen_process(self, channel):
p: redis.client.PubSub = self.listenTasks[channel]['pub']
message = p.get_message(timeout=self.LISTEN_INTERVAL / 2000, ignore_subscribe_messages=True)
if message:
command = message['data']
if type(command) == bytes:
command = command.decode('utf-8')
print(command)
self.update_state.emit(self.MODULE_NAME, State.REDIS_NEW, {"data" : command})
@ehpyqtSlot()
def listen_thread(self, channel):
Thread(target=self.listen_process, args=[channel]).start()
|
test_fft.py
|
import functools
import numpy as np
import pytest
import cupy
from cupy.fft import config
from cupy.fft._fft import (_default_fft_func, _fft, _fftn,
_size_last_transform_axis)
from cupy import testing
from cupy.testing._helper import _wraps_partial
@pytest.fixture
def skip_forward_backward(request):
if request.instance.norm in ('backward', 'forward'):
if not (np.lib.NumpyVersion(np.__version__) >= '1.20.0'):
pytest.skip('forward/backward is supported by NumPy 1.20+')
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with config.enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@_wraps_partial(impl, name)
def test_func(self, *args, **kw):
# get original global planning state
planning_state = config.enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
config.enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
config.enable_nd_planning = planning_state
return test_func
return decorator
def multi_gpu_config(gpu_configs=None):
"""Decorator for parameterized tests with different GPU configurations.
Args:
gpu_configs (list of list): The GPUs to test.
.. notes:
The decorated tests are skipped if no or only one GPU is available.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
use_multi_gpus = config.use_multi_gpus
_devices = config._devices
try:
for gpus in gpu_configs:
try:
nGPUs = len(gpus)
assert nGPUs >= 2, 'Must use at least two gpus'
config.use_multi_gpus = True
config.set_cufft_gpus(gpus)
self.gpus = gpus
impl(self, *args, **kw)
except Exception:
print('GPU config is:', gpus)
raise
finally:
config.use_multi_gpus = use_multi_gpus
config._devices = _devices
del self.gpus
return test_func
return decorator
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 0, 5, 10, 15],
'shape': [(0,), (10, 0), (10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestFft:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(*testing.product({
'shape': [(0, 10), (10, 0, 10), (10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.gpu
class TestFftOrder:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
# See #3757 and NVIDIA internal ticket 3093094
def _skip_multi_gpu_bug(shape, gpus):
# avoid CUDA 11.0 (will be fixed by CUDA 11.2) bug triggered by
# - batch = 1
# - gpus = [1, 0]
if (11000 <= cupy.cuda.runtime.runtimeGetVersion() < 11200
and len(shape) == 1
and gpus == [1, 0]):
pytest.skip('avoid CUDA 11 bug')
# Almost identical to the TestFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 0, 64],
'shape': [(0,), (0, 10), (64,), (4, 64)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuFft:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
# Almost identical to the TestFftOrder class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuFftOrder:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@testing.gpu
class TestDefaultPlanType:
@nd_planning_states()
def test_default_fft_func(self, enable_nd):
# test cases where nd cuFFT plan is possible
ca = cupy.ones((16, 16, 16))
for axes in [(0, 1), (1, 2), None, (0, 1, 2)]:
fft_func = _default_fft_func(ca, axes=axes)
if enable_nd:
# TODO(leofang): test newer ROCm versions
if axes == (0, 1) and cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# only a single axis is transformed -> 1d plan preferred
for axes in [(0, ), (1, ), (2, )]:
assert _default_fft_func(ca, axes=axes) is _fft
# non-contiguous axes -> nd plan not possible
assert _default_fft_func(ca, axes=(0, 2)) is _fft
# >3 axes transformed -> nd plan not possible
ca = cupy.ones((2, 4, 6, 8))
assert _default_fft_func(ca) is _fft
# first or last axis not included -> nd plan not possible
assert _default_fft_func(ca, axes=(1, )) is _fft
# for rfftn
ca = cupy.random.random((4, 2, 6))
for s, axes in zip([(3, 4), None, (8, 7, 5)],
[(-2, -1), (0, 1), None]):
fft_func = _default_fft_func(ca, s=s, axes=axes, value_type='R2C')
if enable_nd:
# TODO(leofang): test newer ROCm versions
if axes == (0, 1) and cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# nd plan not possible if last axis is not 0 or ndim-1
assert _default_fft_func(ca, axes=(2, 1), value_type='R2C') is _fft
# for irfftn
ca = cupy.random.random((4, 2, 6)).astype(cupy.complex128)
for s, axes in zip([(3, 4), None, (8, 7, 5)],
[(-2, -1), (0, 1), None]):
fft_func = _default_fft_func(ca, s=s, axes=axes, value_type='C2R')
if enable_nd:
# To get around hipFFT's bug, we don't use PlanNd for C2R
# TODO(leofang): test newer ROCm versions
if cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# nd plan not possible if last axis is not 0 or ndim-1
assert _default_fft_func(ca, axes=(2, 1), value_type='C2R') is _fft
@pytest.mark.skipif(10010 <= cupy.cuda.runtime.runtimeGetVersion() <= 11010,
reason='avoid a cuFFT bug (cupy/cupy#3777)')
@testing.gpu
@testing.slow
class TestFftAllocate:
def test_fft_allocate(self):
# Check CuFFTError is not raised when the GPU memory is enough.
# See https://github.com/cupy/cupy/issues/1063
# TODO(mizuno): Simplify "a" after memory compaction is implemented.
a = []
for i in range(10):
a.append(cupy.empty(100000000))
del a
b = cupy.empty(100000007, dtype=cupy.float32)
cupy.fft.fft(b)
# Free huge memory for slow test
del b
cupy.get_default_memory_pool().free_all_blocks()
# Clean up FFT plan cache
cupy.fft.config.clear_plan_cache()
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
{'shape': (3, 4), 's': (0, 5), 'axes': None},
{'shape': (3, 4), 's': (1, 0), 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFft2:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft2(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft2(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': [-1, -2]},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': ()},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFftn:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward']})
)
))
@testing.gpu
class TestPlanCtxManagerFftn:
@pytest.fixture(autouse=True)
def skip_buggy(self):
if cupy.cuda.runtime.is_hip:
# TODO(leofang): test newer ROCm versions
if (self.axes == (0, 1) and self.shape == (2, 3, 4)):
pytest.skip("hipFFT's PlanNd for this case "
"is buggy, so Plan1d is generated "
"instead")
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
def test_fftn_error_on_wrong_plan(self, dtype, enable_nd):
if 0 in self.shape:
pytest.skip('0 in shape')
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fftn
assert config.enable_nd_planning == enable_nd
# can't get a plan, so skip
if self.axes is not None:
if self.s is not None:
if len(self.s) != len(self.axes):
return
elif len(self.shape) != len(self.axes):
return
a = testing.shaped_random(self.shape, cupy, dtype)
bad_in_shape = tuple(2*i for i in self.shape)
if self.s is None:
bad_out_shape = bad_in_shape
else:
bad_out_shape = tuple(2*i for i in self.s)
b = testing.shaped_random(bad_in_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, bad_out_shape, self.axes)
with pytest.raises(ValueError) as ex, plan_wrong:
fftn(a, s=self.s, axes=self.axes, norm=self.norm)
# targeting a particular error
assert 'The cuFFT plan and a.shape do not match' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), ],
'norm': [None, 'backward', 'ortho', 'forward'],
}))
@testing.gpu
class TestPlanCtxManagerFft:
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
# Almost identical to the TestPlanCtxManagerFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 64],
'shape': [(64,), (128,)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuPlanCtxManagerFft:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(4*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
if self.norm == '':
# if norm is invalid, we still get ValueError, but it's raised
# when checking norm, earlier than the plan check
return # skip
assert 'Target array size does not match the plan.' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4, 5), 's': None, 'axes': (-3, -2, -1)},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFftnContiguity:
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_fftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.fftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.ifftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestRfft:
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,)],
'norm': [None, 'backward', 'ortho', 'forward'],
}))
@testing.gpu
class TestPlanCtxManagerRfft:
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape, value_type='R2C')
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape, value_type='C2R')
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
def test_rfft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import rfft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, value_type='R2C')
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
rfft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfft2:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
pytest.skip('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfft2EmptyAxes:
@testing.for_all_dtypes(no_complex=True)
def test_rfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfftn:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
pytest.skip('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
# Only those tests in which a legit plan can be obtained are kept
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestPlanCtxManagerRfftn:
@pytest.fixture(autouse=True)
def skip_buggy(self):
if cupy.cuda.runtime.is_hip:
# TODO(leofang): test newer ROCm versions
if (self.axes == (0, 1) and self.shape == (2, 3, 4)):
pytest.skip("hipFFT's PlanNd for this case "
"is buggy, so Plan1d is generated "
"instead")
@nd_planning_states()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes, value_type='R2C')
with plan:
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason="hipFFT's PlanNd for C2R is buggy")
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes, value_type='C2R')
with plan:
out = xp.fft.irfftn(
a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
# TODO(leofang): write test_rfftn_error_on_wrong_plan()?
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfftnContiguity:
@nd_planning_states([True])
@testing.for_float_dtypes()
def test_rfftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.rfftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes,
value_type='R2C')
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.irfftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes,
value_type='C2R')
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfftnEmptyAxes:
@testing.for_all_dtypes(no_complex=True)
def test_rfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestHfft:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_hfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.hfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ihfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ihfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'n': 1, 'd': 1},
{'n': 10, 'd': 0.5},
{'n': 100, 'd': 2},
)
@testing.gpu
class TestFftfreq:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftfreq(self, xp, dtype):
out = xp.fft.fftfreq(self.n, self.d)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfftfreq(self, xp, dtype):
out = xp.fft.rfftfreq(self.n, self.d)
return out
@testing.parameterize(
{'shape': (5,), 'axes': None},
{'shape': (5,), 'axes': 0},
{'shape': (10,), 'axes': None},
{'shape': (10,), 'axes': 0},
{'shape': (10, 10), 'axes': None},
{'shape': (10, 10), 'axes': 0},
{'shape': (10, 10), 'axes': (0, 1)},
)
@testing.gpu
class TestFftshift:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftshift(x, self.axes)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftshift(x, self.axes)
return out
class TestThreading:
def test_threading1(self):
import threading
from cupy.cuda.cufft import get_current_plan
def thread_get_curr_plan():
cupy.cuda.Device().use()
return get_current_plan()
new_thread = threading.Thread(target=thread_get_curr_plan)
new_thread.start()
def test_threading2(self):
import threading
a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10)
def thread_do_fft():
cupy.cuda.Device().use()
b = cupy.fft.fftn(a)
return b
new_thread = threading.Thread(target=thread_do_fft)
new_thread.start()
|
scheduler.py
|
import threading
from time import time
import random
import Queue
from splunktalib.common import log
logger = log.Logs().get_logger("util")
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import splunktalib.sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = Queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except Queue.Empty:
pass
else:
if done:
break
self._started = False
logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
gattclient.py
|
import os
import sys
import code
import argparse
from threading import Thread
import gevent
from binascii import unhexlify
from PyBT.roles import LE_Central
from PyBT.gatt_core import Connection, ConnectionError
def debug(msg):
if os.getenv("DEBUG"):
sys.stdout.write(msg)
sys.stdout.write("\n")
def _argparser():
parser = argparse.ArgumentParser(description='Gatt Client')
parser.add_argument('-i', '--interface', dest='interface', action='store',
type=int, help='Interface to use', default=0)
return parser
class InvalidCommand(Exception):
pass
class UnknownCommand(Exception):
pass
class CommandModule(object):
"""Dumb container for commands"""
@staticmethod
def scan(*args):
if len(args) == 0 or args[0] == 'on':
arg = 'on'
elif args[0] == 'off':
arg = 'off'
else:
raise InvalidCommand("scan [on|off]")
return ('scan', arg)
@staticmethod
def connect(*args):
def fail():
raise InvalidCommand("connect <address> [public|random]")
arg = None
if len(args) == 1:
pass
elif len(args) == 2:
if args[1] in ('public', 'random'):
arg = args[1]
else:
fail()
else:
fail()
return ('connect', args[0], arg)
@staticmethod
def quit(*args):
return ('quit', )
@staticmethod
def write_req(*args):
if len(args) != 2:
raise InvalidCommand("write-req <handle> <value>")
try:
handle = int(args[0], base=16)
value = unhexlify(args[1])
except:
raise InvalidCommand("Format error, handle is a hex int and value is a bunch of hex bytes")
return ('write-req', handle, value)
@staticmethod
def write_cmd(*args):
if len(args) != 2:
raise InvalidCommand("write-cmd <handle> <value>")
try:
handle = int(args[0], base=16)
value = unhexlify(args[1])
except:
raise InvalidCommand("Format error, handle is a hex int and value is a bunch of hex bytes")
return ('write-cmd', handle, value)
@staticmethod
def read(*args):
if len(args) != 1:
raise InvalidCommand("read <handle>")
try:
handle = int(args[0], base=16)
except:
raise InvalidCommand("Format error, handle is a hex int")
return ('read', handle)
@staticmethod
def interval(*args):
if len(args) != 2:
raise InvalidCommand("interval <min> <max>")
try:
min = int(args[0])
max = int(args[1])
except:
raise InvalidCommand("Format error, min and max must be integers")
return ('interval', min, max)
@staticmethod
def raw(*args):
if len(args) != 1:
print "Error: raw [data]"
return None
try:
data = unhexlify(args[0])
except:
print "Format error, data is a bunch of hex bytes"
return None
return ('raw', data)
COMMANDS = {
'scan': CommandModule.scan,
'connect': CommandModule.connect,
'quit': CommandModule.quit,
'write-req': CommandModule.write_req,
'write-cmd': CommandModule.write_cmd,
'read': CommandModule.read,
'interval': CommandModule.interval,
}
def parse_command(f):
if len(f) == 0:
return None
cmd_name = f[0]
try:
cmd = COMMANDS[cmd_name](*f[1:])
return cmd
except IndexError:
pass # Ignore people mushing return
except KeyError as e:
print "Error: Unknown command '%s'" % e.args[0]
raise UnknownCommand("unknown: %s" % e.args[0])
except InvalidCommand as e:
print(repr(e)) # TODO Deal more gracefully
def socket_handler(central):
global seen, state, onconnect
# handle events
def runsource_with_connection(connection):
# orig_runsource = code.InteractiveConsole.runsource
def runsource(self, source, filename='<input>',
symbol='single', encode=True):
# Try parsing it as a gatt client thing, then fall back to python
debug("[-] %s" % repr(source.split()))
oncommand_hack = False
try:
parts = source.split()
if len(parts) == 0:
return
if parts[0] == 'oncommand':
oncommand_hack = True
parts.pop(0)
# FIXME(richo) Find out what version gives short syntax
args = parse_command(parts)
cmd = args[0].replace('-', '_')
args = args[1:]
except UnknownCommand:
# XXX uncomment me to make this into a python repl
# return orig_runsource(self, source)
return None
if cmd is not None:
debug("[-] %s(%s)" % (repr(cmd), repr(args)))
# FIXME(richo) Deal gracefully with the AttributeError
func = getattr(connection, cmd)
if oncommand_hack:
self.connection.oncommand(lambda: func(*args))
else:
func(*args)
return runsource
def main():
parser = _argparser()
args = parser.parse_args()
central = LE_Central(adapter=args.interface)
connection = Connection(central)
connection.start()
code.InteractiveConsole.runsource = runsource_with_connection(connection)
Thread(target=code.interact).start()
gevent.wait()
if __name__ == '__main__':
main()
|
client.py
|
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
canvas2 = None
nameEntry = None
nameWindow = None
gameWindow = None
leftBoxes = []
rightBoxes = []
finishingBox = None
playerType = None
playerTurn = None
player1Name = 'joining'
player2Name = 'joining'
player1Label = None
player2Label = None
player1Score = 0
player2Score = 0
player2ScoreLabel = None
player2ScoreLabel = None
dice = None
rollButton = None
resetButton = None
winingMessage = None
winingFunctionCall = 0
def checkColorPosition(boxes, color):
for box in boxes:
boxColor = box.cget("bg")
if(boxColor == color):
return boxes.index(box)
return False
def movePlayer1(steps):
global leftBoxes
boxPosition = checkColorPosition(leftBoxes[1:],"red")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(steps == remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='red')
global SERVER
global playerName
greetMessage = f'Red wins the game.'
SERVER.send(greetMessage.encode())
elif(steps < remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
leftBoxes[nextStep].configure(bg='red')
else:
print("Move False")
else:
# first step
leftBoxes[steps].configure(bg='red')
def movePlayer2(steps):
global rightBoxes
tempBoxes=rightBoxes[-2::-1]
boxPosition=checkColorPosition(tempBoxes,"yellow")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(diceValue == remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='yellow',fg="black")
global SERVER
global playerName
greetMessage = f'yellow wins the game.'
SERVER.send(greetMessage.encode())
elif(steps < remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
rightBoxes[::-1][nextStep].configure(bg='yellow')
else:
print("Move False")
else:
# first step
rightBoxes[len(rightBoxes)-(steps+1)].configure(bg='yellow')
def rollDice():
global SERVER
#create a number variable in which the list of all the ASCII characters of the string will be stored
#Use backslash because unicode must have a backslash
diceChoices=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
#configure the label
value = random.choice(diceChoices)
global playerType
global rollButton
global playerTurn
rollButton.destroy()
playerTurn = False
if(playerType == 'player1'):
SERVER.send(f'{value}player2Turn'.encode())
if(playerType == 'player2'):
SERVER.send(f'{value}player1Turn'.encode())
def leftBoard():
global gameWindow
global leftBoxes
global screen_height
xPos = 30
for box in range(0,11):
if(box == 0):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="red")
boxLabel.place(x=xPos, y=screen_height/2 - 88)
leftBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2- 100)
leftBoxes.append(boxLabel)
xPos +=75
def rightBoard():
global gameWindow
global rightBoxes
global screen_height
xPos = 988
for box in range(0,11):
if(box == 10):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="yellow")
boxLabel.place(x=xPos, y=screen_height/2-88)
rightBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2 - 100)
rightBoxes.append(boxLabel)
xPos +=75
def finishingBox():
global gameWindow
global finishingBox
global screen_width
global screen_height
finishingBox = Label(gameWindow, text="Home", font=("Chalkboard SE", 32), width=8, height=4, borderwidth=0, bg="green", fg="white")
finishingBox.place(x=screen_width/2 - 68, y=screen_height/2 -160)
def gameWindow():
global gameWindow
global canvas2
global screen_width
global screen_height
global dice
global winingMessage
global resetButton
gameWindow = Tk()
gameWindow.title("Ludo Ladder")
gameWindow.attributes('-fullscreen',True)
screen_width = gameWindow.winfo_screenwidth()
screen_height = gameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas2 = Canvas( gameWindow, width = 500,height = 500)
canvas2.pack(fill = "both", expand = True)
# Display image
canvas2.create_image( 0, 0, image = bg, anchor = "nw")
# Add Text
canvas2.create_text( screen_width/2, screen_height/5, text = "Ludo Ladder", font=("Chalkboard SE",100), fill="white")
# Declaring Wining Message
winingMessage = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 250, text = "", font=("Chalkboard SE",100), fill='#fff176')
# Creating Reset Button
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
leftBoard()
rightBoard()
finishingBox()
global rollButton
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
global playerTurn
global playerType
global playerName
global player1Name
global player2Name
global player1Label
global player2Label
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if(playerType == 'player1' and playerTurn):
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
else:
rollButton.pack_forget()
# Creating Dice with value 1
dice = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 100, text = "\u2680", font=("Chalkboard SE",250), fill="white")
# Creating name board
player1Label = canvas2.create_text(400, screen_height/2 + 100, text = player1Name, font=("Chalkboard SE",80), fill='#fff176' )
player2Label = canvas2.create_text(screen_width - 300, screen_height/2 + 100, text = player2Name, font=("Chalkboard SE",80), fill='#fff176' )
# Creating Score Board
player1ScoreLabel = canvas2.create_text(400, screen_height/2 - 160, text = player1Score, font=("Chalkboard SE",80), fill='#fff176' )
player2ScoreLabel = canvas2.create_text(screen_width - 300, screen_height/2 - 160, text = player2Score, font=("Chalkboard SE",80), fill='#fff176' )
gameWindow.resizable(True, True)
gameWindow.mainloop()
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
gameWindow()
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Ludo Ladder")
nameWindow.attributes('-fullscreen',True)
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/2, screen_height/5, text = "Enter Name", font=("Chalkboard SE",100), fill="white")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 50), bd=5, bg='white')
nameEntry.place(x = screen_width/2 - 220, y=screen_height/4 + 100)
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=15, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/2 - 130, y=screen_height/2 - 30)
nameWindow.resizable(True, True)
nameWindow.mainloop()
def restGame():
global SERVER
SERVER.send("reset game".encode())
def handleWin(message):
global playerType
global rollButton
global canvas2
global winingMessage
global screen_width
global screen_height
global resetButton
#destroying button
if('Red' in message):
if(playerType == 'player2'):
rollButton.destroy()
if('Yellow' in message):
if(playerType == 'player1'):
rollButton.destroy()
# Adding Wining Message
message = message.split(".")[0] + "."
canvas2.itemconfigure(winingMessage, text = message)
#Placing Reset Button
resetButton.place(x=screen_width / 2 - 80, y=screen_height - 220)
def updateScore(message):
global canvas2
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if('Red' in message):
player1Score +=1
if('Yellow' in message):
player2Score +=1
canvas2.itemconfigure(player1ScoreLabel, text = player1Score)
canvas2.itemconfigure(player2ScoreLabel, text = player2Score)
def handleResetGame():
global canvas2
global playerType
global gameWindow
global rollButton
global dice
global screen_width
global screen_height
global playerTurn
global rightBoxes
global leftBoxes
global finishingBox
global resetButton
global winingMessage
global winingFunctionCall
canvas2.itemconfigure(dice, text='\u2680')
# Handling Reset Game
if(playerType == 'player1'):
# Creating roll dice button
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
playerTurn = True
if(playerType == 'player2'):
playerTurn = False
for rBox in rightBoxes[-2::-1]:
rBox.configure(bg='white')
for lBox in leftBoxes[1:]:
lBox.configure(bg='white')
finishingBox.configure(bg='green')
canvas2.itemconfigure(winingMessage, text="")
resetButton.destroy()
# Again Recreating Reset Button for next game
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
winingFunctionCall = 0
def recivedMsg():
global SERVER
global playerType
global playerTurn
global rollButton
global screen_width
global screen_height
global canvas2
global dice
global gameWindow
global player1Name
global player2Name
global player1Label
global player2Label
global winingFunctionCall
while True:
message = SERVER.recv(2048).decode()
if('player_type' in message):
recvMsg = eval(message)
playerType = recvMsg['player_type']
playerTurn = recvMsg['turn']
elif('player_names' in message):
players = eval(message)
players = players["player_names"]
for p in players:
if(p["type"] == 'player1'):
player1Name = p['name']
if(p['type'] == 'player2'):
player2Name = p['name']
elif('⚀' in message):
# Dice with value 1
canvas2.itemconfigure(dice, text='\u2680')
elif('⚁' in message):
# Dice with value 2
canvas2.itemconfigure(dice, text='\u2681')
elif('⚂' in message):
# Dice with value 3
canvas2.itemconfigure(dice, text='\u2682')
elif('⚃' in message):
# Dice with value 4
canvas2.itemconfigure(dice, text='\u2683')
elif('⚄' in message):
# Dice with value 5
canvas2.itemconfigure(dice, text='\u2684')
elif('⚅' in message):
# Dice with value 6
canvas2.itemconfigure(dice, text='\u2685')
elif('wins the game.' in message and winingFunctionCall == 0):
winingFunctionCall +=1
handleWin(message)
# Addition Activity
updateScore(message)
elif(message == 'reset game'):
handleResetGame()
#creating rollbutton
if('player1Turn' in message and playerType == 'player1'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
elif('player2Turn' in message and playerType == 'player2'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 260)
# Creating Name Board
if(player1Name != 'joining' and canvas2):
canvas2.itemconfigure(player1Label, text=player1Name)
if(player2Name != 'joining' and canvas2):
canvas2.itemconfigure(player2Label, text=player2Name)
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 8000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
sync.py
|
# FILE INFO ###################################################
# Author: Jason Liu <jasonxliu2010@gmail.com>
# Created on July 28, 2019
# Last Update: Time-stamp: <2019-08-19 19:25:26 liux>
###############################################################
from collections import defaultdict
import multiprocessing as mp
#from concurrent import futures
import time, atexit
from .simulus import *
from .simulator import *
__all__ = ["sync"]
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class sync(object):
"""A synchronized group of simulators whose simulation clocks will
advance synchronously."""
_simulus = None
def __init__(self, sims, enable_smp=False, enable_spmd=False, lookahead=infinite_time, smp_ways=None):
"""Create a synchronized group of multiple simulators.
Bring all simulators in the group to synchrony; that is, the
simulation clocks of all the simulators in the group, from now
on, will be advanced synchronously in a coordinated fashion.
Args:
sims (list or tuple): a list of local simulators; the
simulators are identified either by their names or as
direct references to instances
enable_smp (bool): enable SMP (Symmetric Multi-Processing)
mode, in which case each local simulator will run as a
separate process, and communication between the
simulators will be facilitated through inter-process
communication (IPC) mechanisms; the default is False,
in which case all local simulators will run
sequentially within the same process
enable_spmd (bool): enable SPMD (Single Program Multiple
Data) mode, in which case multiple simulus instances,
potentially on distributed memory machines, will run
in parallel, where communication between the simulus
instances will be facilitated through the Message
Passing Interface (MPI); the default is False, in
which case the local simulus instance will run
standalone with all simulators running either
sequentially as one process (when enable_smp is
False), or in parallel as separate processes (when
enable_smp is True)
lookahead (float): the maximum difference in simulation
time between the simulators in the group; the default
is infinity; the final lookahead for parallel
simulation will be determined by the min delays of the
named mailboxes of the simulators
smp_ways (int): the maximum number of processes to be
created for shared-memory multiprocessing. This
parameter is only used when SMP is enabled.
Returns:
This function creates, initializes, and returns a
synchronized group. The simulators will first advance
their simulation clock (asynchronously) to the maximum
simulation time among all simulators (including both local
simulators and remote ones, if enable_spmd is True). When
the function returns, the listed simulators are bound to
the synchronized group. That is, the simulation clocks of
the simulators will be advanced synchronously from now on:
all simulators will process events (including all messages
sent between the simulators) in the proper timestamp
order. (This is also known as the local causality
constraint in the parallel discrete-event simulation
literature.)
"""
# the simulus instance is a class variable
if not sync._simulus:
sync._simulus = _Simulus()
if lookahead <= 0:
errmsg = "sync(looahead=%r) expects a positive lookahead" % lookahead
log.error(errmsg)
raise ValueError(errmsg)
if smp_ways is not None and \
(not isinstance(smp_ways, int) or smp_ways <= 0):
errmsg = "sync(smp_ways=%r) expects a positive integer" % smp_ways
log.error(errmsg)
raise ValueError(errmsg)
self._activated = False # keep it false until we are done with creating the sync group
self._smp = enable_smp
self._smp_ways = smp_ways
self._spmd = enable_spmd
if self._spmd and not sync._simulus.args.mpi:
errmsg = "sync(enable_spmd=True) requires MPI support (use --mpi or -x command-line option)"
log.error(errmsg)
raise ValueError(errmsg)
# the local simulators are provided either by names or as
# direct references
self._local_sims = {} # a map from names to simulator instances
self._all_sims = {} # a map from names to mpi ranks (identifying simulator's location)
now_max = minus_infinite_time # to find out the max simulation time of all simulators
if not isinstance(sims, (list, tuple)):
errmsg = "sync(sims=%r) expects a list of simulators" % sims
log.error(errmsg)
raise TypeError(errmsg)
for s in sims:
if isinstance(s, str):
# if simulator name is provided, turn it into instance
ss = sync._simulus.get_simulator(s)
if ss is None:
errmsg = "sync() expects a list of simulators, but '%s' is not" % s
log.error(errmsg)
raise ValueError(errmsg)
else: s = ss
# the item must be a simulator instance
if isinstance(s, simulator):
if s._insync:
# the simulator's already in a sync group
if s._insync != self:
errmsg = "sync() simulator '%s' belongs to another group" % s.name
else:
errmsg = "sync() duplicate simulator '%s' listed" % s.name
log.error(errmsg)
raise ValueError(errmsg)
else:
s._insync = self
self._local_sims[s.name] = s
self._all_sims[s.name] = sync._simulus.comm_rank
if s.now > now_max: now_max = s.now
else:
errmsg = "sync() expects a list of simulators, but %r is not" % s
log.error(errmsg)
raise ValueError(errmsg)
# a synchronized group cannot be empty
if len(self._local_sims) < 1:
errmsg = "sync() sims should not be empty"
log.error(errmsg)
raise ValueError(errmsg)
# if this is a global synchronization group (i.e., when
# enable_spmd is true), we need to learn about the remote
# simulators (e.g., the ranks at which they reside), and get
# the maximum simulation time of all simulators in the group
if self._spmd:
self._all_sims = sync._simulus.allgather(self._all_sims)
now_max = sync._simulus.allreduce(now_max, max)
# find all mailboxes attached to local simulators
self._lookahead = lookahead
self._local_mboxes = {} # a map from mailbox names to mailbox instances
self._all_mboxes = {} # a map from mail name to corresponding simulator name, min_delay and num of partitions
for sname, sim in self._local_sims.items():
for mbname, mb in sim._mailboxes.items():
if mbname in self._local_mboxes:
if sim == mb._sim:
errmsg = "sync() duplicate mailbox named '%s' in simulator '%s'" % \
(mbname, sname)
else:
errmsg = "sync() duplicate mailbox name '%s' in simulators '%s' and '%s'" % \
(mbname, sname, mb._sim.name)
log.error(errmsg)
raise ValueError(errmsg)
else:
self._local_mboxes[mbname] = mb
self._all_mboxes[mbname] = (sname, mb.min_delay, mb.nparts)
if mb.min_delay < self._lookahead:
self._lookahead = mb.min_delay
# if this is a global synchronization group (i.e., when
# enable_spmd is true) , we need to learn about the remote
# mailboxes and the min delays of all mailboxes
if self._spmd:
self._all_mboxes = sync._simulus.allgather(self._all_mboxes)
self._lookahead = sync._simulus.allreduce(self._lookahead, min)
# lookahead must be strictly positive
if self._lookahead <= 0:
errmsg = "sync() expects positive lookahead; " + \
"check min_delay of mailboxes in simulators"
log.error(errmsg)
raise ValueError(errmsg)
# bring all local simulators' time to the max now
for sname, sim in self._local_sims.items():
if sim.now < now_max:
sim._run(now_max, True)
self.now = now_max
log.info("[r%d] creating sync (enable_smp=%r, enable_spmd=%r): now=%g, lookahead=%g" %
(sync._simulus.comm_rank, self._smp, self._spmd, self.now, self._lookahead))
for sname, simrank in self._all_sims.items():
log.info("[r%d] >> simulator '%s' => r%d" %
(sync._simulus.comm_rank, sname, simrank))
for mbname, (sname, mbdly, mbparts) in self._all_mboxes.items():
log.info("[r%d] >> mailbox '%s' => sim='%s', min_delay=%g, nparts=%d" %
(sync._simulus.comm_rank, mbname, sname, mbdly, mbparts))
# ready for next window
self._remote_msgbuf = defaultdict(list) # a map from rank to list of remote messages
self._remote_future = infinite_time
self._local_partitions = None
self._activated = True
def run(self, offset=None, until=None, show_runtime_report=False):
"""Process events of all simulators in the synchronized group each in
timestamp order and advances the simulation time of all simulators
synchronously.
Args:
offset (float): relative time from now until which each of
the simulators should advance its simulation time; if
provided, it must be a non-negative value
until (float): the absolute time until which each of the
simulators should advance its simulation time; if
provided, it must not be earlier than the current time
The user can specify either 'offset' or 'until', but not both;
if both 'offset' and 'until' are ignored, the simulator will
run as long as there are events on the event lists of the
simulators. Be careful, in this case, the simulation may run
forever as for some models there may always be future events.
Each simulator will process their events in timestamp order.
Synchronization is provided so that messages sent between the
simulators may not produce causality errors. When this method
returns, the simulation time of the simulators will advance to
the designated time, if either 'offset' or 'until' has been
specified. All events with timestamps smaller than the
designated time will be processed. If neither 'offset' nor
'until' is provided, the simulators will advance to the time
of the last processed event among all simulators.
If SPMD is enabled, at most one simulus instance (at rank 0)
is allowed to specify the time (using 'offset' or 'until').
All the other simulators must not specify the time.
"""
# figure out the time, up to which all events will be processed
upper_specified = 1
if until == None and offset == None:
upper = infinite_time
upper_specified = 0
elif until != None and offset != None:
errmsg = "sync.run(until=%r, offset=%r) duplicate specification" % (until, offset)
log.error(errmsg)
raise ValueError(errmsg)
elif offset != None:
if offset < 0:
errmsg = "sync.run(offset=%r) negative offset" % offset
log.error(errmsg)
raise ValueError(errmsg)
upper = self.now + offset
elif until < self.now:
errmsg = "sync.run(until=%r) earlier than now (%r)" % (until, self.now)
log.error(errmsg)
raise ValueError(errmsg)
else: upper = until
if self._spmd:
# only rank 0 can specify the upper for global synchronization
if upper_specified > 0 and sync._simulus.comm_rank > 0:
errmsg = "sync.run() 'offset' or 'until' allowed only on rank 0"
log.error(errmsg)
raise ValueError(errmsg)
# we conduct a global synchronization to get the upper
# time for all
sync._simulus.bcast(0) # run command
upper = sync._simulus.allreduce(upper, min)
upper_specified = sync._simulus.allreduce(upper_specified, max)
if self._local_partitions is None:
if self._smp:
# divide the local simulators among the CPU/cores
sims = list(self._local_sims.keys())
if self._smp_ways is None:
self._smp_ways = mp.cpu_count()
k, m = divmod(len(sims), self._smp_ways)
self._local_partitions = list(filter(lambda x: len(x)>0, \
(sims[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(self._smp_ways))))
self._local_queues = {} # a map from pid to queue
self._local_pids = {} # a map from simulator name to pid
for pid, snames in enumerate(self._local_partitions):
self._local_queues[pid] = mp.Queue()
for s in snames: self._local_pids[s] = pid
# start the child processes
self._child_procs = [mp.Process(target=sync._child_run, args=(self, i)) \
for i in range(1, len(self._local_partitions))]
for p in self._child_procs: p.start()
else:
self._local_partitions = [self._local_sims.keys()]
self._local_pids = {} # a map from simulator name to pid
for s in self._local_sims.keys():
self._local_pids[s] = 0
atexit.register(self._run_finish)
self._smp_run(0, upper, upper_specified)
if self._simulus.comm_rank > 0:
while True:
cmd = self._simulus.bcast(None)
if cmd == 0: # run command
upper = sync._simulus.allreduce(infinite_time, min)
upper_specified = sync._simulus.allreduce(0, max)
self._smp_run(0, upper, upper_specified)
elif cmd == 1: # report command
self._smp_report(0)
else: # stop command
assert cmd == 2
break
else:
if show_runtime_report:
self.show_runtime_report()
def _child_run(self, pid):
"""The child processes running in SMP mode."""
log.info("[r%d] sync._child_run(pid=%d): partitions=%r" %
(sync._simulus.comm_rank, pid, self._local_partitions))
assert self._smp and pid>0
# if smp is enabled and for all child processes, we need
# to clear up remote message buffer so that events don't
# get duplicated on different processes
self._remote_msgbuf.clear()
self._remote_future = infinite_time
while True:
try:
cmd = self._local_queues[pid].get()
except KeyboardInterrupt:
# we handle the keyboard interrupt here, since Jupyter
# notebook seems to raise this exception when the
# kernel is interrupted
continue
log.info("[r%d] sync._child_run(pid=%d): recv command %d" %
(sync._simulus.comm_rank, pid, cmd))
if cmd == 0: # run command
upper, upper_specified = self._local_queues[pid].get()
self._smp_run(pid, upper, upper_specified)
elif cmd == 1: # report command
self._smp_report(pid)
else: # stop command
assert cmd == 2
break
def _smp_run(self, pid, upper, upper_specified):
"""Run simulators in separate processes."""
log.info("[r%d] sync._smp_run(pid=%d): begins with upper=%g, upper_specified=%r" %
(sync._simulus.comm_rank, pid, upper, upper_specified))
run_sims = self._local_partitions[pid]
if pid == 0:
for s in range(1, len(self._local_partitions)):
self._local_queues[s].put(0) # run command
self._local_queues[s].put((upper, upper_specified))
while True:
# figure out the start time of the next window (a.k.a.,
# lower bound on timestamp): it's the minimum of three
# values: (1) the timestamp of the first event plus the
# lookahead, (2) the smallest timestamp of messages to be
# sent to a remote simulator, and (3) the upper time
horizon = infinite_time
for s in run_sims:
t = self._local_sims[s].peek()
if horizon > t: horizon = t
if horizon < infinite_time:
horizon += self._lookahead
if horizon > self._remote_future:
horizon = self._remote_future
if horizon > upper:
horizon = upper
# find the next window for all processes on all ranks
if len(self._local_partitions) > 1:
if pid > 0:
self._local_queues[0].put(horizon)
else:
for s in range(1, len(self._local_partitions)):
x = self._local_queues[0].get()
if x < horizon: horizon = x
if self._spmd and pid == 0:
horizon = sync._simulus.allreduce(horizon, min)
if len(self._local_partitions) > 1:
if pid > 0:
horizon = self._local_queues[pid].get()
else:
for s in range(1, len(self._local_partitions)):
self._local_queues[s].put(horizon)
#log.debug("[r%d] sync._run(pid='%d'): sync window [%g:%g]" %
# (sync._simulus.comm_rank, pid, self.now, horizon))
# if there's no more event anywhere, and the upper was not
# specified, it means we can simply stop by now, the
# previous iteration should have updated the current time
# to the horizon for the last event
if horizon == infinite_time and upper_specified == 0:
break
# bring all local simulators' time to horizon
for s in run_sims:
#log.debug("[r%d] sync._run(): simulator '%s' execute [%g:%g]" %
# (sync._simulus.comm_rank, s[-4:], self._local_sims[s].now, horizon))
self._local_sims[s]._run(horizon, True)
self.now = horizon
# distribute remote messages:
# first, gather remote messages from processes
if len(self._local_partitions) > 1:
if pid > 0:
#log.debug("[r%d] sync._run(pid=%d): put %r to pid 0" %
# (sync._simulus.comm_rank, pid, self._remote_msgbuf))
self._local_queues[0].put(self._remote_msgbuf)
else:
for s in range(1, len(self._local_partitions)):
x = self._local_queues[0].get()
#log.debug("[r%d] sync._run(pid=0): get %r" %
# (sync._simulus.comm_rank, x))
for r in x.keys():
self._remote_msgbuf[r].extend(x[r])
# second, distribute via all to all
if pid == 0:
if self._spmd:
incoming = sync._simulus.alltoall(self._remote_msgbuf)
else:
incoming = self._remote_msgbuf[0]
#log.debug("[r%d] sync._run(pid=0): all-to-all incoming=%r" %
# (sync._simulus.comm_rank, incoming))
# third, scatter messages to target processes
if len(self._local_partitions) > 1:
if pid > 0:
incoming = self._local_queues[pid].get()
#log.debug("[r%d] sync._run(pid=%d): get %r" %
# (sync._simulus.comm_rank, pid, incoming))
else:
pmsgs = defaultdict(list)
if incoming is not None:
for m in incoming:
_, mbname, *_ = m # find destination mailbox name
s, *_ = self._all_mboxes[mbname] # find destination simulator name
# find destination pid
pmsgs[self._local_pids[s]].append(m)
for s in range(1, len(self._local_partitions)):
self._local_queues[s].put(pmsgs[s])
#log.debug("[r%d] sync._run(pid=%d): put %r to pid %d" %
# (sync._simulus.comm_rank, pid, pmsgs[s], s))
incoming = pmsgs[0]
#log.debug("[r%d] sync._run(pid=%d): keep %r" %
# (sync._simulus.comm_rank, pid, incoming))
if incoming is not None:
for until, mbname, part, msg in incoming:
mbox = self._local_mboxes[mbname]
mbox._sim.sched(mbox._mailbox_event, msg, part, until=until)
# now we can remove the old messages and get ready for next window
self._remote_msgbuf.clear()
self._remote_future = infinite_time
if horizon >= upper: break
log.info("[r%d] sync._smp_run(pid=%d): finishes with upper=%g, upper_specified=%r" %
(sync._simulus.comm_rank, pid, upper, upper_specified))
def _run_finish(self):
log.info("[r%d] sync._run_finish() at exit" % sync._simulus.comm_rank)
if self._simulus.comm_rank ==0:
self._simulus.bcast(2) # stop command
if len(self._local_partitions) > 1:
for s in range(1, len(self._local_partitions)):
self._local_queues[s].put(2) # stop command
for p in self._child_procs: p.join()
def send(self, sim, mbox_name, msg, delay=None, part=0):
"""Send a messsage from a simulator to a named mailbox.
Args:
sim (simulator): the simulator from which the message will
be sent
name (str): the name of the mailbox to which the message
is expected to be delivered
msg (object): a message can be any Python object; however,
a message needs to be pickle-able as it may be
transferred between different simulators located on
separate processes (with different Python interpreter)
or even on different machines; a message also cannot
be None
delay (float): the delay with which the message is
expected to be delivered to the mailbox; if it is
ignored, the delay will be set to be the min_delay of
the mailbox; if it is set, the delay value must not be
smaller than the min_delay of the mailbox
part (int): the partition number of the mailbox to which
the message will be delivered; the default is zero
Returns:
This method returns nothing (as opposed to the mailbox
send() method); once sent, it's sent, as it cannot be
cancelled or rescheduled.
"""
if not self._activated:
errmsg = "sync.send() called before the synchronized is created"
log.error(errmsg)
raise RuntimeError(errmsg)
if sim is None or not isinstance(sim, simulator):
errmsg = "sync.send(sim=%r) requires a simulator" % sim
log.error(errmsg)
raise ValueError(errmsg)
if sim.name not in self._local_sims:
errmsg = "sync.send(sim='%s') simulator not in synchronized group" % sim.name
log.error(errmsg)
raise ValueError(errmsg)
if msg is None:
errmsg = "sync.send() message cannot be None"
log.error(errmsg)
raise ValueError(errmsg)
if mbox_name in self._all_mboxes:
sname, min_delay, nparts = self._all_mboxes[mbox_name]
if delay is None:
delay = min_delay
elif delay < min_delay:
errmsg = "sync.send() delay (%g) less than min_delay (%r)" % \
(delay, min_delay)
log.error(errmsg)
raise ValueError(errmsg)
if part < 0 or part >= nparts:
errmsg = "sync.send(part=%r) out of range (target mailbox '%s' has %d partitions)" % \
(part, mbox_name, nparts)
log.error(errmsg)
raise IndexError(errmsg)
# if it's local delivery, send to the target mailbox
# directly; a local delivery can be one of the two cases:
# 1) if SMP is disabled (that is, all local simulators are
# executed on the same process), the target mailbox
# belongs to one of the local simulators; or 2) if SMP is
# enabled (that is, all local simulators are executed on
# separate processes), the target mailbox belongs to the
# same sender simulator
if not self._smp and mbox_name in self._local_mboxes or \
mbox_name in sim._mailboxes:
mbox = self._local_mboxes[mbox_name]
until = sim.now+delay
mbox._sim.sched(mbox._mailbox_event, msg, part, until=until)
#log.debug("[r%d] sync.send(sim='%s') to local mailbox '%s': msg=%r, delay=%g (until=%g), part=%d" %
# (sync._simulus.comm_rank, sim.name[-4:], mbox_name, msg, delay, until, part))
else:
until = sim.now+delay
self._remote_msgbuf[self._all_sims[sname]].append((until, mbox_name, part, msg))
if self._remote_future > until:
self._remote_future = until
#log.debug("[r%d] sync.send(sim='%s') to remote mailbox '%s' on simulator '%s': msg=%r, delay=%g, part=%d" %
# (sync._simulus.comm_rank, sim.name[-4:], mbox_name, sname[-4:], msg, delay, part))
else:
errmsg = "sync.send() to mailbox named '%s' not found" % mbox_name
log.error(errmsg)
raise ValueError(errmsg)
@classmethod
def comm_rank(cls):
"""Return the process rank of this simulus instance."""
# the simulus instance is a class variable
if not sync._simulus:
sync._simulus = _Simulus()
return sync._simulus.comm_rank
@classmethod
def comm_size(cls):
"""Return the total number processes."""
# the simulus instance is a class variable
if not sync._simulus:
sync._simulus = _Simulus()
return sync._simulus.comm_size
def show_runtime_report(self, show_partition=True, prefix=''):
"""Print a report on the runtime performance of running the
synchronized group.
Args:
show_partition (bool): if it's True (the default), the
print-out report also contains the processor
assignment of the simulators
prefix (str): all print-out lines will be prefixed by this
string (the default is empty); this would help if one
wants to find the report in a large amount of output
"""
if self._spmd and sync._simulus.comm_rank > 0:
errmsg = "sync.show_runtime_report() allowed only on rank 0"
log.error(errmsg)
raise RuntimeError(errmsg)
if self._local_partitions is None:
errmsg = "sync.show_runtime_report() called before sync.run()"
log.error(errmsg)
raise ValueError(errmsg)
if self._spmd:
cmd = sync._simulus.bcast(1) # report command
self._smp_report(0, show_partition, prefix)
def _smp_report(self, pid, show_partition=None, prefix=None):
"""Collect statistics and report them."""
if pid == 0:
for s in range(1, len(self._local_partitions)):
self._local_queues[s].put(1) # report command
t1 = time.time()
run_sims = self._local_partitions[pid]
sync_rt = {
"start_clock": time.time(),
"sims": self._local_pids.copy(),
"scheduled_events": 0,
"cancelled_events": 0,
"executed_events": 0,
"initiated_processes": 0,
"cancelled_processes": 0,
"process_contexts": 0,
"terminated_processes": 0,
}
for s in run_sims:
rt = self._local_sims[s]._runtime
if rt["start_clock"] < sync_rt["start_clock"]:
sync_rt["start_clock"] = rt["start_clock"]
sync_rt["scheduled_events"] += rt["scheduled_events"]
sync_rt["cancelled_events"] += rt["cancelled_events"]
sync_rt["executed_events"] += rt["executed_events"]
sync_rt["initiated_processes"] += rt["initiated_processes"]
sync_rt["cancelled_processes"] += rt["cancelled_processes"]
sync_rt["process_contexts"] += rt["process_contexts"]
sync_rt["terminated_processes"] += rt["terminated_processes"]
if len(self._local_partitions) > 1:
if pid > 0:
self._local_queues[0].put(sync_rt)
else:
for s in range(1, len(self._local_partitions)):
rt = self._local_queues[0].get()
if rt["start_clock"] < sync_rt["start_clock"]:
sync_rt["start_clock"] = rt["start_clock"]
sync_rt["scheduled_events"] += rt["scheduled_events"]
sync_rt["cancelled_events"] += rt["cancelled_events"]
sync_rt["executed_events"] += rt["executed_events"]
sync_rt["initiated_processes"] += rt["initiated_processes"]
sync_rt["cancelled_processes"] += rt["cancelled_processes"]
sync_rt["process_contexts"] += rt["process_contexts"]
sync_rt["terminated_processes"] += rt["terminated_processes"]
if pid == 0 and self._spmd:
all_rts = sync._simulus.gather(sync_rt)
if self._simulus.comm_rank == 0:
sync_rt = all_rts[0]
for rt in all_rts[1:]:
if rt["start_clock"] < sync_rt["start_clock"]:
sync_rt["start_clock"] = rt["start_clock"]
sync_rt["sims"].update(rt["sims"])
sync_rt["scheduled_events"] += rt["scheduled_events"]
sync_rt["cancelled_events"] += rt["cancelled_events"]
sync_rt["executed_events"] += rt["executed_events"]
sync_rt["initiated_processes"] += rt["initiated_processes"]
sync_rt["cancelled_processes"] += rt["cancelled_processes"]
sync_rt["process_contexts"] += rt["process_contexts"]
sync_rt["terminated_processes"] += rt["terminated_processes"]
if pid == 0 and self._simulus.comm_rank == 0:
print('%s*********** sync group performance metrics ***********' % prefix)
if show_partition:
print('%spartitioning information (simulator assignment):' % prefix)
for sname, simrank in self._all_sims.items():
print("%s '%s' on rank %d proc %d" % (prefix, sname, simrank, sync_rt["sims"][sname]))
t = t1-sync_rt["start_clock"]
print('%sexecution time: %g' % (prefix,t))
print('%sscheduled events: %d (rate=%g)' %
(prefix, sync_rt["scheduled_events"], sync_rt["scheduled_events"]/t))
print('%sexecuted events: %d (rate=%g)' %
(prefix, sync_rt["executed_events"], sync_rt["executed_events"]/t))
print('%scancelled events: %d' % (prefix, sync_rt["cancelled_events"]))
print('%screated processes: %d' % (prefix, sync_rt["initiated_processes"]))
print('%sfinished processes: %d' % (prefix, sync_rt["terminated_processes"]))
print('%scancelled processes: %d' % (prefix, sync_rt["cancelled_processes"]))
print('%sprocess context switches: %d' % (prefix, sync_rt["process_contexts"]))
|
test_httplib.py
|
import enum
import errno
from http import client, HTTPStatus
import io
import itertools
import os
import array
import re
import socket
import threading
import warnings
import unittest
from unittest import mock
TestCase = unittest.TestCase
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import warnings_helper
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = socket_helper.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_dir_with_added_behavior_on_status(self):
# see issue40084
self.assertTrue({'description', 'name', 'phrase', 'value'} <= set(dir(HTTPStatus(404))))
def test_simple_httpstatus(self):
class CheckedHTTPStatus(enum.IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
* RFC 7725: An HTTP Status Code to Report Legal Obstacles
* RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)
* RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)
* RFC 8297: An HTTP Status Code for Indicating Hints
* RFC 8470: Using Early Data in HTTP
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
EARLY_HINTS = 103, 'Early Hints'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved permanently -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
IM_A_TEAPOT = (418, 'I\'m a Teapot',
'Server refuses to brew coffee because it is a teapot.')
MISDIRECTED_REQUEST = (421, 'Misdirected Request',
'Server is not able to produce a response')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
TOO_EARLY = 425, 'Too Early'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
UNAVAILABLE_FOR_LEGAL_REASONS = (451,
'Unavailable For Legal Reasons',
'The server is denying access to the '
'resource as a consequence of a legal demand')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')
enum._test_simple_enum(CheckedHTTPStatus, HTTPStatus)
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_past_end(self):
# if we have Content-Length, clip reads to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(10), b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_past_end(self):
# if we have Content-Length, clip readintos to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(10)
n = resp.readinto(b)
self.assertEqual(n, 4)
self.assertEqual(bytes(b)[:4], b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_header_limit_after_100(self):
body = (
'HTTP/1.1 100 OK\r\n'
'r\n' * 32768
)
resp = client.HTTPResponse(FakeSocket(body))
with self.assertRaises(client.HTTPException) as cm:
resp.begin()
# We must assert more because other reasonable errors that we
# do not want can also be HTTPException derived.
self.assertIn('got more than ', str(cm.exception))
self.assertIn('headers', str(cm.exception))
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.create_server((HOST, 0))
self.addCleanup(serv.close)
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
denylist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in denylist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'IM_A_TEAPOT',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'UNAVAILABLE_FOR_LEGAL_REASONS',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
'EARLY_HINTS',
'TOO_EARLY'
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.source_port = socket_helper.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with socket_helper.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with socket_helper.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'key_file, cert_file and check_hostname are deprecated',
DeprecationWarning)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("body")
with open(os_helper.TESTFN, encoding="utf-8") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(os_helper.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_tunnel_connect_single_send_connection_setup(self):
"""Regresstion test for https://bugs.python.org/issue43332."""
with mock.patch.object(self.conn, 'send') as mock_send:
self.conn.set_tunnel('destination.com')
self.conn.connect()
self.conn.request('GET', '/')
mock_send.assert_called()
# Likely 2, but this test only cares about the first.
self.assertGreater(
len(mock_send.mock_calls), 1,
msg=f'unexpected number of send calls: {mock_send.mock_calls}')
proxy_setup_data_sent = mock_send.mock_calls[0][1][0]
self.assertIn(b'CONNECT destination.com', proxy_setup_data_sent)
self.assertTrue(
proxy_setup_data_sent.endswith(b'\r\n\r\n'),
msg=f'unexpected proxy data sent {proxy_setup_data_sent!r}')
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
serial_collection.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenHTF plug for serial port.
Allows for writing out to a serial port.
"""
import logging
import threading
from typing import Optional
from openhtf.core import base_plugs
from openhtf.util import conf
try:
# pylint: disable=g-import-not-at-top
import serial # pytype: disable=import-error
# pylint: enable=g-import-not-at-top
except ImportError:
logging.error(
'Failed to import pyserial. Please install the `serial_collection_plug` extra, '
'e.g. via `pip install openhtf[serial_collection_plug]`.')
raise
conf.declare(
'serial_collection_port',
description='Port on which to collect serial data.',
default_value='/dev/ttyACM0')
conf.declare(
'serial_collection_baud',
description='Baud rate for serial data collection.',
default_value=115200)
class SerialCollectionPlug(base_plugs.BasePlug):
"""Plug that collects data from a serial port.
Spawns a thread that will open the configured serial port, continuously
poll the port for data, and write the data to the destination file as it is
received. If any serial errors are encountered during the lifetime of the
polling thread, data collection stops and an error message is logged.
Otherwise, data collection stops and the serial port is closed when
stop_collection() is called.
"""
# Serial library can raise these exceptions
SERIAL_EXCEPTIONS = (serial.SerialException, ValueError)
_serial = None # type: serial.Serial
_serial_port = None # type: int
_collect = None # type: bool
_collection_thread = None # type: Optional[threading.Thread]
@conf.inject_positional_args
def __init__(self, serial_collection_port, serial_collection_baud):
super(SerialCollectionPlug, self).__init__()
# Instantiate the port with no name, then add the name, so it won't be
# opened until the collection context is entered.
self._serial = serial.Serial(
port=None, baudrate=serial_collection_baud, timeout=1)
self._serial.port = serial_collection_port
self._collect = False
self._collection_thread = None
def start_collection(self, dest):
def _poll():
try:
with open(dest, 'w+') as outfile:
while self._collect:
data = self._serial.readline().decode()
outfile.write(data)
except self.SERIAL_EXCEPTIONS:
self.logger.error(
'Serial port error. Stopping data collection.', exc_info=True)
self._collect = True
self._collection_thread = threading.Thread(target=_poll)
self._collection_thread.daemon = True
self.logger.debug('Starting serial data collection on port %s.' %
self._serial.port)
self._serial.open()
self._collection_thread.start()
@property
def is_collecting(self):
if self._collection_thread is not None:
return self._collection_thread.is_alive()
return False
def stop_collection(self):
if not self.is_collecting:
self.logger.warning('Data collection was not running, cannot be stopped.')
return
self._collect = False
self._collection_thread.join()
self._serial.close()
|
thread.py
|
# -*- coding: utf8 -*-
import time
import threading
import urlparse
from downloader import Downloader
SLEEP_TIME = 1
def threaded_crawler(seed_url, delay=5, cache=None, scrape_callback=None, user_agent='wswp', proxies=None, num_retries=1, max_threads=10, timeout=60):
"""Crawl this website in multiple threads
"""
# the queue of URL's that still need to be crawled
#crawl_queue = Queue.deque([seed_url])
crawl_queue = [seed_url]
# the URL's that have been seen
seen = set([seed_url])
D = Downloader(cache=cache, delay=delay, user_agent=user_agent, proxies=proxies, num_retries=num_retries, timeout=timeout)
def process_queue():
while True:
try:
url = crawl_queue.pop()
except IndexError:
# crawl queue is empty
break
else:
html = D(url)
if scrape_callback:
try:
links = scrape_callback(url, html) or []
except Exception as e:
print 'Error in callback for: {}: {}'.format(url, e)
else:
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen.add(link)
# add this new link to queue
crawl_queue.append(link)
# wait for all download threads to finish
threads = []
while threads or crawl_queue:
# the crawl is still active
for thread in threads:
if not thread.is_alive():
# remove the stopped threads
threads.remove(thread)
while len(threads) < max_threads and crawl_queue:
# can start some more threads
thread = threading.Thread(target=process_queue)
thread.setDaemon(True) # set daemon so main thread can exit when receives ctrl-c
thread.start()
threads.append(thread)
# all threads have been processed
# sleep temporarily so CPU can focus execution on other threads
time.sleep(SLEEP_TIME)
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates
return urlparse.urljoin(seed_url, link)
|
server.py
|
# Date: 06/01/2018
# Author: Pure-L0G1C
# Description: Server
import ssl
import socket
from os import path
from lib import const
from time import sleep
from queue import Queue
from random import randint
from OpenSSL import crypto
from threading import Thread, RLock
from . lib import session, shell, interface
class Server(object):
def __init__(self):
self.interface = interface.Interface()
self.waiting_conn = Queue()
self.is_active = False # is the server active
self.lock = RLock()
self.server = None
self.port = None
self.ip = None
def gen_cert(self):
key_pair = crypto.PKey()
key_pair.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.get_subject().O = 'Loki'
cert.get_subject().CN = 'Sami'
cert.get_subject().OU = 'Pure-L0G1C'
cert.get_subject().C = 'US'
cert.get_subject().L = 'Los Santos'
cert.get_subject().ST = 'California'
cert.set_serial_number(randint(2048 ** 8, 4096 ** 8))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(256 * 409600)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key_pair)
cert.sign(key_pair, 'sha256')
with open(const.CERT_FILE, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(const.KEY_FILE, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key_pair))
def server_start(self):
self.gen_cert()
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(const.CERT_FILE, const.KEY_FILE)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((self.ip, self.port))
self.is_active = True
sock.settimeout(0.5)
sock.listen(100)
self.server = context.wrap_socket(sock, server_side=True)
self.services_start()
except OSError:
self.display_text('Error: invalid IP')
self.port = None
self.ip = None
def server_stop(self):
if not self.is_active:return
self.is_active = False
self.interface.close()
self.ip, self.port = None, None
def manage_conn_info(self, sess_obj, conn_info):
if conn_info:
try:
with self.lock:
services = {
'ssh': {
'ip': const.PUBLIC_IP,
'port': const.SSH_PORT
}, 'ftp': {
'ip': const.PUBLIC_IP,
'port': const.FTP_PORT
}
}
sess_obj.send(args=services)
self.manage_conn(sess_obj, conn_info)
except:pass
def manage_conn(self, sess_obj, conn_info):
_shell = shell.Shell(sess_obj, self.interface)
shell_thread = Thread(target=_shell.start)
self.interface.connect_client(sess_obj, conn_info, _shell)
shell_thread.daemon = True
shell_thread.start()
def establish_conn(self, sess, ip):
s = session.Session(sess, ip)
conn_info = s.initial_communication()
self.manage_conn_info(s, conn_info)
def waiting_conn_manager(self):
while self.is_active:
if self.waiting_conn.qsize():
session, ip = self.waiting_conn.get()
sleep(0.5)
print('Establishing a secure connection ...')
self.establish_conn(session, ip)
def server_loop(self):
while self.is_active:
try:
session, ip = self.server.accept()
self.waiting_conn.put([session, ip])
except:
pass
def services_start(self):
server_loop = Thread(target=self.server_loop)
conn_manager = Thread(target=self.waiting_conn_manager)
server_loop.daemon = True
conn_manager.daemon = True
server_loop.start()
conn_manager.start()
print('Server started successfully')
# -------- UI -------- #
def display_text(self, text):
print('{0}{1}{0}'.format('\n\n\t', text))
def start(self, ip, port):
if self.is_active:self.server_stop()
self.ip, self.port = ip, int(port)
self.server_start()
sleep(1.2)
return self.is_active
def stop(self, delay=True):
if self.is_active:
self.server_stop()
sleep(1.2 if delay else 0)
return self.is_active
|
test_cymj.py
|
import pytest
from numbers import Number
from io import BytesIO, StringIO
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from mujoco_py import (MjSim, MjSimPool, load_model_from_xml,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from mujoco_py import const, cymj
from mujoco_py.tests.utils import compare_imgs, requires_rendering
import scipy.misc
import os
import shutil
import glob
from multiprocessing import get_context
import sys
BASIC_MODEL_XML = """
<mujoco>
<worldbody>
<light name="light1" diffuse=".5 .5 .5" pos="0 0 3" dir="0 0 -1"/>
<camera name="camera1" pos="3 0 0" zaxis="1 0 0" />
<geom name="geom1" pos="0.5 0.4 0.3" type="plane" size="1 1 0.1" rgba=".9 0 0 1"/>
<body pos="0 0 1" name="body1">
<joint name="joint1" type="free"/>
<geom name="geom2" pos="0 1 0" type="box" size=".1 .2 .3" rgba="0 .9 0 1"/>
<site name="site1" pos="1 0 0" size="0.1" type="sphere"/>
<site name="sensorsurf" pos="0 0.045 0" size=".03 .03 .03" type="ellipsoid" rgba="0.3 0.2 0.1 0.3"/>
</body>
<body pos="1 0 0" name="mocap1" mocap="true">
<geom conaffinity="0" contype="0" pos="0 0 0" size="0.01 0.01 0.01" type="box"/>
</body>
</worldbody>
<sensor>
<touch name="touchsensor" site="sensorsurf" />
</sensor>
</mujoco>
"""
def test_nested():
model = load_model_from_xml(BASIC_MODEL_XML)
model.vis.global_.fovy
model.vis.quality.shadowsize
def test_mj_sim_basics():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model, nsubsteps=2)
sim.reset()
sim.step()
sim.reset()
sim.forward()
@requires_rendering
def test_arrays_of_objs():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
renderer = cymj.MjRenderContext(sim, offscreen=True)
assert len(renderer.scn.camera) == 2, "Expecting scn.camera to be available"
def test_model_save_load():
model = load_model_from_xml(BASIC_MODEL_XML)
xml_from_model = model.get_xml()
model_from_xml = load_model_from_xml(xml_from_model)
assert(xml_from_model == model_from_xml.get_xml())
mjb_from_model = model.get_mjb()
model_from_mjb = load_model_from_mjb(mjb_from_model)
assert(mjb_from_model == model_from_mjb.get_mjb())
def test_sim_save():
model = load_model_from_xml(BASIC_MODEL_XML)
assert model.nkey == 0
sim = MjSim(model)
with StringIO() as f:
sim.save(f)
f.seek(0)
loaded_model = load_model_from_xml(f.read())
assert loaded_model.nkey == 1
with BytesIO() as f:
sim.save(f, format='mjb')
f.seek(0)
loaded_model = load_model_from_mjb(f.read())
assert loaded_model.nkey == 1
def test_mj_sim_buffers():
model = load_model_from_xml(BASIC_MODEL_XML)
# test no callback
sim = MjSim(model, nsubsteps=2)
assert(sim.udd_state == {})
sim.step()
assert(sim.udd_state == {})
# test with callback
foo = 10
d = {"foo": foo,
"foo_2": np.array([foo, foo])}
def udd_callback(sim):
return d
sim = MjSim(model, nsubsteps=2, udd_callback=udd_callback)
assert(sim.udd_state is not None)
assert(sim.udd_state["foo"] == foo)
assert(sim.udd_state["foo_2"].shape[0] == 2)
assert(sim.udd_state["foo_2"][0] == foo)
foo = 11
d = {"foo": foo,
"foo_2": np.array([foo, foo])}
sim.step()
assert(sim.udd_state is not None)
assert(sim.udd_state["foo"] == foo)
assert(sim.udd_state["foo_2"][0] == foo)
d = {}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": foo,
"foo_2": np.array([foo, foo]),
"foo_3": foo}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": foo,
"foo_2": np.array([foo, foo, foo])}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": "haha",
"foo_2": np.array([foo, foo, foo])}
with pytest.raises(AssertionError):
sim.step()
def test_mj_sim_pool_buffers():
model = load_model_from_xml(BASIC_MODEL_XML)
foo = 10
def udd_callback(sim):
return {"foo": foo}
sims = [MjSim(model, udd_callback=udd_callback) for _ in range(2)]
sim_pool = MjSimPool(sims, nsubsteps=2)
for i in range(len(sim_pool.sims)):
assert(sim_pool.sims[i].udd_state is not None)
assert(sim_pool.sims[i].udd_state["foo"] == 10)
foo = 11
sim_pool.step()
for i in range(len(sim_pool.sims)):
assert(sim_pool.sims[i].udd_state is not None)
assert(sim_pool.sims[i].udd_state["foo"] == 11)
def test_mj_sim_pool_basics():
model = load_model_from_xml(BASIC_MODEL_XML)
sims = [MjSim(model) for _ in range(2)]
sim_pool = MjSimPool(sims, nsubsteps=2)
sim_pool.reset()
sim_pool.step()
sim_pool.forward()
def test_data_attribute_getters():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
assert_array_equal(sim.data.get_body_xpos("body1"), [0, 0, 1])
with pytest.raises(ValueError):
sim.data.get_body_xpos("body_foo")
with pytest.raises(RuntimeError):
sim.data.get_xpos("body1")
assert len(sim.data.get_body_xquat("body1")) == 4
assert_array_equal(sim.data.get_body_xmat("body1").shape, (3, 3))
# At (0, 1, 1) since the geom is displaced in the body
assert_array_equal(sim.data.get_body_xipos("body1"), [0, 1, 1])
assert_array_equal(sim.data.get_site_xpos("site1"), [1, 0, 1])
assert_array_equal(sim.data.get_site_xmat("site1").shape, (3, 3))
assert_array_equal(sim.data.get_geom_xpos("geom1"), [0.5, 0.4, 0.3])
assert_array_equal(sim.data.get_geom_xpos("geom2"), [0, 1, 1])
assert_array_equal(sim.data.get_geom_xmat("geom2").shape, (3, 3))
assert_array_equal(sim.data.get_light_xpos("light1"), [0, 0, 3])
assert_array_equal(sim.data.get_light_xdir("light1"), [0, 0, -1])
assert_array_equal(sim.data.get_camera_xpos("camera1"), [3, 0, 0])
assert_array_equal(sim.data.get_camera_xmat("camera1").shape, (3, 3))
assert_array_equal(sim.data.get_joint_xaxis("joint1"), [0, 0, 1])
assert_array_equal(sim.data.get_joint_xanchor("joint1"), [0, 0, 1])
def test_joint_qpos_qvel_ops():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
# Test setting one with a list
sim.data.set_joint_qpos("joint1", [1, 2, 3, 1, 0, 0, 0])
# And the other with an np.ndarray
sim.data.set_joint_qvel("joint1", np.array([1, 2, 3, 0.1, 0.1, 0.1]))
sim.forward()
assert_array_equal(sim.data.get_joint_qpos(
"joint1"), [1, 2, 3, 1, 0, 0, 0])
assert_array_equal(sim.data.get_joint_qvel(
"joint1"), [1, 2, 3, 0.1, 0.1, 0.1])
def test_mocap_ops():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
assert_array_equal(sim.data.get_body_xpos("mocap1"), [1, 0, 0])
assert_array_equal(sim.data.get_mocap_pos("mocap1"), [1, 0, 0])
assert_array_equal(sim.data.get_mocap_quat("mocap1"), [1, 0, 0, 0])
new_pos = [2, 1, 1]
new_quat = [0.707107, 0.707107, 0, 0]
sim.data.set_mocap_pos("mocap1", new_pos)
sim.data.set_mocap_quat("mocap1", new_quat)
sim.forward()
assert_array_equal(sim.data.get_mocap_pos("mocap1"), new_pos)
assert_array_almost_equal(sim.data.get_mocap_quat("mocap1"), new_quat)
assert_array_equal(sim.data.get_body_xpos("mocap1"), new_pos)
assert_array_almost_equal(sim.data.get_body_xquat("mocap1"), new_quat)
assert_array_almost_equal(sim.data.get_body_xmat("mocap1"),
[[1, 0, 0], [0, 0, -1], [0, 1, 0]])
def test_sim_state():
model = load_model_from_xml(BASIC_MODEL_XML)
foo = 10
d = {"foo": foo,
"foo_array": np.array([foo, foo, foo]),
"foo_2darray": np.reshape(np.array([foo, foo, foo, foo]), (2, 2)),
}
def udd_callback(sim):
return d
sim = MjSim(model, nsubsteps=2, udd_callback=udd_callback)
state = sim.get_state()
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
for k in state.udd_state.keys():
if (isinstance(state.udd_state[k], Number)):
assert state.udd_state[k] == sim.udd_state[k]
else:
assert np.array_equal(state.udd_state[k], sim.udd_state[k])
# test flatten, unflatten
a = state.flatten()
assert len(a) == (1 + sim.model.nq + sim.model.nv + sim.model.na + 8)
state2 = MjSimState.from_flattened(a, sim)
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
for k in state2.udd_state.keys():
if (isinstance(state2.udd_state[k], Number)):
assert state2.udd_state[k] == sim.udd_state[k]
else:
assert np.array_equal(state2.udd_state[k], sim.udd_state[k])
assert state2 == state
assert not state2 != state
# test equality with deleting keys
state2 = state2._replace(udd_state={"foo": foo})
assert state2 != state
assert not (state2 == state)
# test equality with changing contents of array
state2 = state2._replace(
udd_state={"foo": foo, "foo_array": np.array([foo, foo + 1])})
assert state2 != state
assert not (state2 == state)
# test equality with adding keys
d2 = dict(d)
d2.update({"not_foo": foo})
state2 = state2._replace(udd_state=d2)
assert state2 != state
assert not (state2 == state)
# test defensive copy
sim.set_state(state)
state.qpos[0] = -1
assert not np.array_equal(state.qpos, sim.data.qpos)
state3 = sim.get_state()
state3.qpos[0] = -1
assert not np.array_equal(state3.qpos, sim.data.qpos)
state3.udd_state["foo_array"][0] = -1
assert not np.array_equal(
state3.udd_state["foo_array"], sim.udd_state["foo_array"])
# test no callback
sim = MjSim(model, nsubsteps=2)
state = sim.get_state()
print("state.udd_state = %s" % state.udd_state)
assert state.udd_state == {}
# test flatten, unflatten
a = state.flatten()
assert len(a) == 1 + sim.model.nq + sim.model.nv + sim.model.na
state2 = MjSimState.from_flattened(a, sim)
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
assert state.udd_state == sim.udd_state
def test_mj_warning_raises():
''' Test that MuJoCo warnings cause exceptions. '''
# Two boxes on a plane need more than 1 contact (nconmax)
xml = '''
<mujoco>
<size nconmax="1"/>
<worldbody>
<geom type="plane" size="1 1 0.1"/>
<body pos="1 0 1"> <joint type="free"/> <geom size="1"/> </body>
<body pos="0 1 1"> <joint type="free"/> <geom size="1"/> </body>
</worldbody>
</mujoco>
'''
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
with pytest.raises(Exception):
# This should raise an exception due to the mujoco warning callback
sim.step()
def test_ignore_mujoco_warnings():
# Two boxes on a plane need more than 1 contact (nconmax)
xml = '''
<mujoco>
<size nconmax="1"/>
<worldbody>
<geom type="plane" size="1 1 0.1"/>
<body pos="1 0 1"> <joint type="free"/> <geom size="1"/> </body>
<body pos="0 1 1"> <joint type="free"/> <geom size="1"/> </body>
</worldbody>
</mujoco>
'''
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
with ignore_mujoco_warnings():
# This should raise an exception due to the mujoco warning callback,
# but it's suppressed by the context manager.
sim.step()
sim.reset()
with pytest.raises(Exception):
# test to make sure previous warning callback restored.
sim.step()
def test_jacobians():
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint axis="1 0 0" name="a" pos="0 0 0" type="hinge"/>
<geom name="geom1" pos="0 0 0" size="1.0"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 1" type="hinge"/>
<geom name="geom2" pos="1 1 1" size="0.5"/>
<site name="target" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
# After reset jacobians are all zeros
target_jacp = np.zeros(3 * sim.model.nv)
sim.data.get_site_jacp('target', jacp=target_jacp)
np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
target_test = np.array([0, 0, -1, 1, 0, 0])
np.testing.assert_allclose(target_jacp, target_test)
# Should be unchanged after steps (zero action)
for _ in range(2):
sim.step()
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
assert np.linalg.norm(target_jacp - target_test) < 1e-3
# Apply a very large action, ensure jacobian unchanged after step
sim.reset()
sim.forward()
sim.data.ctrl[:] = np.ones(sim.model.nu) * 1e9
sim.step()
sim.data.get_site_jacp('target', jacp=target_jacp)
np.testing.assert_allclose(target_jacp, target_test)
# After large action, ensure jacobian changed after forward
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
assert not np.allclose(target_jacp, target_test)
# Test the `site_jacp` property, which gets all at once
np.testing.assert_allclose(target_jacp, sim.data.site_jacp[0])
# Test not passing in array
sim.reset()
sim.forward()
target_jacp = sim.data.get_site_jacp('target')
np.testing.assert_allclose(target_jacp, target_test)
# Test passing in bad array (long instead of double)
target_jacp = np.zeros(3 * sim.model.nv, dtype=np.long)
with pytest.raises(ValueError):
sim.data.get_site_jacp('target', jacp=target_jacp)
# Test rotation jacobian - like above but 'jacr' instead of 'jacp'
# After reset jacobians are all zeros
sim.reset()
target_jacr = np.zeros(3 * sim.model.nv)
sim.data.get_site_jacr('target', jacr=target_jacr)
np.testing.assert_allclose(target_jacr, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
sim.forward()
sim.data.get_site_jacr('target', jacr=target_jacr)
target_test = np.array([1, 1, 0, 0, 0, 0])
# Test allocating dedicated array
target_jacr = sim.data.get_site_jacr('target')
np.testing.assert_allclose(target_jacr, target_test)
# Test the batch getter (all sites at once)
np.testing.assert_allclose(target_jacr, sim.data.site_jacr[0])
# Test passing in bad array
target_jacr = np.zeros(3 * sim.model.nv, dtype=np.long)
with pytest.raises(ValueError):
sim.data.get_site_jacr('target', jacr=target_jacr)
def test_xvelp(): # xvelp = positional velocity in world frame
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint name="a" axis="1 0 0" pos="0 0 0" type="slide"/>
<geom name="geom1" pos="0 0 0" size="1.0"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 1" type="slide"/>
<geom name="geom2" pos="0 0 0" size="0.5"/>
<site name="site1" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
sim.forward()
# Check that xvelp starts out at zero (since qvel is zero)
site1_xvelp = sim.data.get_site_xvelp('site1')
np.testing.assert_allclose(site1_xvelp, np.zeros(3))
# Push the base body and step forward to get it moving
sim.data.ctrl[0] = 1e9
sim.step()
sim.forward()
# Check that the first body has nonzero xvelp
body1_xvelp = sim.data.get_body_xvelp('body1')
assert not np.allclose(body1_xvelp, np.zeros(3))
# Check that the second body has zero xvelp (still)
body2_xvelp = sim.data.get_body_xvelp('body2')
np.testing.assert_allclose(body2_xvelp, np.zeros(3))
# Check that this matches the batch (gathered) getter property
np.testing.assert_allclose(body2_xvelp, sim.data.body_xvelp[2])
def test_xvelr(): # xvelr = rotational velocity in world frame
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint name="a" axis="1 0 0" pos="0 0 0" type="hinge"/>
<geom name="geom1" pos="0 0 0" size="0.3"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 0" type="hinge"/>
<geom name="geom2" pos="0 0 0" size="0.3"/>
<site name="site1" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
sim.forward()
# Check that xvelr starts out at zero (since qvel is zero)
site1_xvelr = sim.data.get_site_xvelr('site1')
np.testing.assert_allclose(site1_xvelr, np.zeros(3))
# Push the base body and step forward to get it moving
sim.data.ctrl[0] = 1e9
sim.step()
sim.forward()
# Check that the first body has nonzero xvelr
body1_xvelr = sim.data.get_body_xvelr('body1')
assert not np.allclose(body1_xvelr, np.zeros(3))
# Check that the second body has zero xvelr (still)
body2_xvelr = sim.data.get_body_xvelr('body2')
np.testing.assert_allclose(body2_xvelr, np.zeros(3))
# Check that this matches the batch (gathered) getter property
np.testing.assert_allclose(body2_xvelr, sim.data.body_xvelr[2])
@requires_rendering
def test_rendering():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
img, depth = sim.render(200, 200, depth=True)
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
depth = np.asarray(depth * 255, dtype=np.uint8)
assert depth.shape == (200, 200)
compare_imgs(depth, 'test_rendering.freecam.depth.png')
img = sim.render(100, 100, camera_name="camera1")
assert img.shape == (100, 100, 3)
compare_imgs(img, 'test_rendering.camera1.png')
img = sim.render(200, 100, camera_name="camera1")
assert img.shape == (100, 200, 3)
compare_imgs(img, 'test_rendering.camera1.narrow.png')
render_context = sim.render_contexts[0]
render_context.add_marker(size=np.array([.4, .5, .6]),
pos=np.array([.4, .5, .6]),
rgba=np.array([.7, .8, .9, 1.0]),
label="mark")
img = sim.render(200, 200, camera_name="camera1")
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering_markers.camera1.png')
@requires_rendering
def test_rendering_failing():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
sim.render(100, 100)
render_context = sim.render_contexts[0]
render_context.add_marker(size=np.array([.4, .5, .6]),
pos=np.array([.4, .5, .6]),
rgba=np.array([.7, .8, .9, 1.0]),
label="blaaaa")
img = sim.render(200, 200, camera_name="camera1")
assert img.shape == (200, 200, 3)
try:
compare_imgs(img, 'test_rendering_markers.camera1.png')
assert False
except Exception as e:
pass
@requires_rendering
def test_viewercontext():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
renderer = cymj.MjRenderContext(sim, offscreen=True)
renderer.add_marker(type=const.GEOM_SPHERE,
size=np.ones(3) * 0.1,
pos=np.zeros(3),
mat=np.eye(3).flatten(),
rgba=np.ones(4),
label="mark")
@requires_rendering
def test_many_sims_rendering():
model = load_model_from_xml(BASIC_MODEL_XML)
sims = [MjSim(model) for _ in range(5)]
pool = MjSimPool(sims)
pool.forward()
for sim in sims:
img, depth = sim.render(200, 200, depth=True)
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
def test_xml_from_path():
model = load_model_from_path("mujoco_py/tests/test.xml")
sim = MjSim(model)
xml = model.get_xml()
assert xml.find("blabla") > -1, "include should be embeeded"
assert xml.find("include") == - \
1, "include should be parsed and not present"
def test_sensors():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.model.sensor_names
sim.data.get_sensor("touchsensor")
@requires_rendering
def test_high_res():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
img = sim.render(1000, 1000)
img = scipy.misc.imresize(img, (200, 200, 3))
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
@pytest.mark.skipif(sys.platform.startswith("win"), reason="This test fails on windows.")
def test_multiprocess():
'''
Tests for importing mujoco_py from multiple processes.
'''
ctx = get_context('spawn')
processes = []
times = 3
queue = ctx.Queue()
for idx in range(3):
processes.append(ctx.Process(target=import_process, args=(queue, )))
for p in processes:
p.start()
for p in processes:
p.join()
for _ in range(times):
assert queue.get(), "One of processes failed."
def import_process(queue):
try:
from mujoco_py import builder
mjpro_path, key_path = builder.discover_mujoco()
builder.load_cython_ext(mjpro_path)
except Exception as e:
queue.put(False)
else:
queue.put(True)
|
gezgin.py
|
import threading
import requests
from bs4 import BeautifulSoup,element
from urllib.parse import quote
import datetime as dt
class Gezgin(object):
def __init__(self,adres,header='Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:79.0) Gecko/20100101 Firefox/79.0',sure=0,yonlendirme=False):
self.Adres=adres
self.sure=sure
self.yonlendirme=yonlendirme
self.Baslik = {
'User-Agent': header,
'Host': adres.split("/")[2],
'Referer': adres.split("/")[0]+"//"+adres.split("/")[2]
}
def HTML(self):
try:
if self.sure:
yanit = requests.get(self.Adres, headers=self.Baslik,allow_redirects=self.yonlendirme,timeout=self.sure)
else:
yanit = requests.get(self.Adres, headers=self.Baslik,allow_redirects=self.yonlendirme)
if yanit.status_code==200:
cevap=BeautifulSoup(yanit.text, 'html.parser')
return cevap
else:
return yanit.status_code
except:
print(self.Adres,"adresinden veri çekilemedi.")
return False
class Getir(object):
def __init__(self,Html,Par):
self.veri=Html
if not type(Par)==list: Par=[Par]
self.parametre=Par
def Metin(self,veri,Parametre):
i=-1
cevap=None
for par in Parametre:
i+=1
if par.Konteyner:
if not type(par.Konteyner)==list: par.Konteyner=[par.Konteyner]
veri=self.Metin(veri,par.Konteyner)
try:
if par.sira>0:
cevap=str(list(Getir(veri,[par]))[par.sira])
elif not par.Val=="":
cevap=veri.find(par.Tag,{par.Atr:par.Val})
elif not par.Atr=="":
cevap=veri.find(par.Tag)[par.Atr]
else:
cevap=veri.find(par.Tag)
if not cevap==None:break
except:
pass
if type(cevap)==list:
cevap=" ".join(cevap)
try:
(bolen,kacinci)=Parametre[i].Ayrac1
if not bolen==0:
#if type(cevap)==element.Tag: cevap=cevap.get_text()
cevap=str(cevap).split(bolen)[kacinci]
except:
print("Ayrac1 (",bolen,kacinci,") bölümleme hatası")
try:
(bolen,kacinci)=Parametre[i].Ayrac2
if not bolen==0:
#if type(cevap)==element.Tag: cevap=cevap.get_text()
cevap=str(cevap).split(bolen)[kacinci]
except:
print("Ayrac2 (",bolen,kacinci,") bölümleme hatası")
return cevap
def __str__(self):
cevap=self.Metin(self.veri,self.parametre)
if type(cevap)==element.Tag: cevap=cevap.get_text()
if cevap==None: return ""
else: return cevap.strip()
def Liste(self,veri,Parametre):
cevap=[]
for par in Parametre:
if par.Konteyner:
if not type(par.Konteyner)==list: par.Konteyner=[par.Konteyner]
veri=self.Metin(self.veri,par.Konteyner)
if not veri:continue
try:
if not par.Val=="":
cevap=veri.find_all(par.Tag,attrs={par.Atr:par.Val})
else: cevap=veri.find_all(par.Tag)
if not cevap==None:break
except:
pass
return iter(cevap)
def __iter__(self):
return self.Liste(self.veri,self.parametre)
class cekilecek(object):
def __init__(self,tag,atr="",val="",ayrac1=(0,0),ayrac2=(0,0),sira=0,Konteyner=False):
self.Tag=tag
self.Atr=atr
self.Val=val
self.Ayrac1=ayrac1
self.Ayrac2=ayrac2
self.toplamUrun=0
self.sayfaAdedi=0
self.sira=sira
self.Konteyner=Konteyner
class urunArama(object):
def __init__(self,adres):
self.Konteyner=False
self.maxUrun=999999999
self.adres=adres
self.tumListe=[]
self.sutunlar={}
self.DUR=False
def adresEki(self,aranacak,sorguEk='ara?q=',sayfaEk='sayfa=',filtreEk="puan=4-max&siralama=yorumsayisi"):
arama=quote(aranacak)
self.sorguEk=sorguEk+arama
self.filtreEk=filtreEk
self.sayfaEk=sayfaEk
#self.Konteyner=""
def anaSayfa(self,Unite,Total):
Sayfa=Gezgin(self.adres+"/"+self.sorguEk+"&"+self.filtreEk)
print(Sayfa.Adres)
sonuc=Sayfa.HTML()
#try:
adet=str(Getir(sonuc,Total)).strip()
if adet=="" or adet.isnumeric()==False:
print("Ürün yok!")
return False
self.toplamUrun= int(adet.replace("'","").replace(".","").replace(",","").replace("+",""))
print("Toplam", self.toplamUrun,"adet ürün bulundu.")
if self.toplamUrun>self.maxUrun:
print("Fakat taranabilecek üst sınır:",self.maxUrun)
self.toplamUrun=self.maxUrun
Sayfa=Gezgin(self.adres+"/"+self.sorguEk+"&"+self.filtreEk)
sonuc=Sayfa.HTML()
sayfadakiUrun=len(list(Getir(sonuc,Unite)))
if sayfadakiUrun<1:
print("Aranan kritere göre ürün bulunamadı!")
return False
if self.toplamUrun%sayfadakiUrun>1:
sayfaAdedi=self.toplamUrun//sayfadakiUrun+1
else: sayfaAdedi=self.toplamUrun//sayfadakiUrun
print("Her bir sayfada (maksimum)",sayfadakiUrun, "ürün olmak üzere, toplam ", sayfaAdedi,"sayfada taranacak.")
self.sayfaAdedi=sayfaAdedi
self.unite=Unite
return True
def sayfalardaGez(self,thr=999):
if thr>self.sayfaAdedi: thr=self.sayfaAdedi
print("İş parçacığı (threading ) sayısı=",thr)
self.tumListe.clear
thrA=[]
i=0
n1=dt.datetime.now()#işlem süresini hesaplatmak için kronometreyi başlattık
for sayfa in range(1,self.sayfaAdedi+1):
i+=1
if self.DUR: break
url=self.adres+"/"+self.sorguEk+"&"+self.filtreEk+"&"+self.sayfaEk+str(sayfa)
ta=threading.Thread(target=self.cekAl, args = (url,))
thrA.append(ta)
ta.start()
if i%thr==0:
for tek in thrA:
tek.join()
thrA=[]
for tek in thrA:
tek.join()
n2=dt.datetime.now()#işlem bitiş süresini hesaplamak için kronometreyi durdur
print(len(self.tumListe),"adet ürünün için çekme işlemi",(n2-n1).seconds,"saniyede tamamlandı.")
return self.tumListe
def cekAl(self,url):
Sayfa=Gezgin(url)
sonuc=Sayfa.HTML()
if sonuc==429 or sonuc==301:
if sonuc==429: print("site tarafından ip bloke edildi.")
self.DUR=True
return
sayfadakiUrunler=list(Getir(sonuc,self.unite))
for urun in sayfadakiUrunler:
satir=[]
for etiket,parametre in self.sutunlar.items():
try:
ekle=str(Getir(urun,parametre))
satir.append(ekle)
except:
satir.append("")#çekilemeyen veri
self.tumListe.append(satir)
if __name__=="__main__":
pass
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
ClientRequestError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError)
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2021_02_01.models import ManagedCluster
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2021_02_01.models import AgentPool
from azure.mgmt.containerservice.v2021_02_01.models import AgentPoolUpgradeSettings
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
pattern = '/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)' # pylint: disable=line-too-long
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity.client_id
raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id))
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"])
kubectl_server_minor_version = int(kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError("Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False,
yes=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError('--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = (monitoring or
(enable_managed_identity and attach_acr) or
ingress_appgw_addon_enabled or
enable_virtual_node or
need_post_creation_vnet_permission_granting)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
_add_virtual_node_role_assignment(cmd, result, vnet_subnet_id)
if need_post_creation_vnet_permission_granting:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
result.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub and
not windows_admin_password):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError('Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
|
Untitled-1.py
|
import re
import os
import queue
import requests
from requests.auth import HTTPProxyAuth
import time
import threading
from PyQt4.QtCore import QThread, pyqtSignal, pyqtSlot
from PyQt4 import QtCore , QtGui
import CusLog
from pypac import PACSession
log = CusLog.CusLog(CusLog.LOG_INFO,"logs.txt")
class DClass(QThread):
complete_signal = pyqtSignal('PyQt_PyObject')
failed_signal = pyqtSignal('PyQt_PyObject')
progress_signal = pyqtSignal('PyQt_PyObject')
speed_signal = pyqtSignal('PyQt_PyObject')
label_signal = pyqtSignal('PyQt_PyObject')
eta_signal = pyqtSignal('PyQt_PyObject')
def __init__(self,
url,
dest,
chunk_size,
d_id,
complete_callback,
failed_callback,
progress_callback,
speed_callback,
label_callback,
time_callback,
proxy = None,
batch = False):
super(DClass, self).__init__()
self.url = url
self.dest = dest
self.chunk_size = chunk_size
self.id = d_id
self.complete_signal.connect(complete_callback)
self.failed_signal.connect(failed_callback)
self.progress_signal.connect(progress_callback)
self.speed_signal.connect(speed_callback)
self.label_signal.connect(label_callback)
self.eta_signal.connect(time_callback)
self.batch = batch
self.proxy = proxy
self.cancelled = False
self.paused = False
self.total_bytes = 0
self.written_bytes = 0
self._lock = threading.Lock()
def run(self):
# self.label_signal.emit(dict(id=self.id, txt='sdfgsdthwteh'))
if self.batch and False:
with open(self.url, 'r') as file:
urls = file.readlines()
count = len(urls)
log("count:{}".format(count))
for url in urls:
if self.cancelled:
break
if is_valid_url(url):
url = url.strip('\n')
count -= 1
dest = os.path.join(self.dest, url.split('/')[-1])
dest = det_filename(dest)
log(dest)
self.label_signal.emit(dict(id=self.id, txt="{}/{} {}".format(len(urls) - count, len(urls), dest)))
self.handler = DHandler(url, dest, self.chunk_size,
self.complete,
self.failed,
self.progress,
self.speed,
self.proxy)
self.handler.start()
# time.sleep(2)
while self.handler.is_running():
time.sleep(.5)
self.batch = False
self.complete()
else:
with self._lock:
self.handler = DHandler( self.url, self.dest,self.chunk_size,
self.complete,
self.failed,
self.progress,
self.speed,
self.proxy)
self.handler.start()
def toggle_pause(self):
self.handler.toggle_pause()
self.paused = self.handler.is_paused()
def cancel(self):
self.cancelled = True
self.handler.cancel()
def stop(self):
pass
def complete(self):
if not self.batch:
self.progress((self.written_bytes,self.total_bytes))
log((self.written_bytes,self.total_bytes))
self.speed()
self.complete_signal.emit({'id':self.id})
def failed(self, val = 0):
log("download Failed")
if val == 1:
self.failed_signal.emit({'id':self.id,'error':'proxy'})
def progress(self, tup):
self.total_bytes = tup[1]
self.written_bytes = tup[0]
self.progress_signal.emit(int( (float(tup[0])/tup[1])*100 ))
def speed(self, dbyte = 0, dtime = 0):
if dbyte and dtime :
spd = float(dbyte) / dtime
if spd > 1024 * 512:
speed = "{:.2f} MB/s".format(float(spd) / (1024 * 1024))
elif spd > 512:
speed = "{:.2f} KB/s".format(float(spd) / 1024)
else:
speed = "{} B/s".format(spd)
else:
speed = "0 B/s"
if self.total_bytes > 1024 * 1024:
tb = "{:.1f}MB".format(float(self.total_bytes) / (1024 * 1024))
rb = "{:.1f}".format(float(self.written_bytes) / (1024 * 1024))
elif self.total_bytes > 1024:
tb = "{:.1f}KB".format(float(self.total_bytes) / (1024))
rb = "{:.1f}".format(float(self.written_bytes) / (1024))
else:
tb = "{}B".format(self.total_bytes)
rb = "{}".format(self.written_bytes)
signal_dic = {'id': self.id, 'progress': '{}/{}'.format(rb, tb), 'speed': '{}'.format(speed)}
self.speed_signal.emit(signal_dic)
if dbyte:
self.eta_signal.emit(dict(id=self.id, eta=int((self.total_bytes - self.written_bytes) / spd)))
else:
self.eta_signal.emit(dict(id=self.id, eta=0))
# self.speed_signal.emit(val)
class DHandler(object):
def __init__(self,
url,
dest,
chunk_size,
complete_callback,
failed_callback,
progress_callback,
speed_callback,
proxy=None):
self.url = url
self.dest = dest
self._chunk_size = chunk_size
self._total_bytes = 0
self._complete = complete_callback
self._failed = failed_callback
self._progress = progress_callback
self._speed = speed_callback
self._proxy = proxy
self._cancelled = False
self._paused = False
self._lock = threading.Lock()
self.thread = threading.Thread(target=self._download, daemon=True)
def start(self):
self.thread.start()
def toggle_pause(self):
log("toggle Pause")
if self.is_paused():
with self._lock:
self._paused = False
else:
with self._lock:
self._paused = True
def cancel(self):
self._cancelled = True
def is_cancelled(self):
return self._cancelled
def is_paused(self):
return self._paused
def is_running(self):
return self.thread.is_alive()
def wait(self):
while self.thread.is_alive():
try:
# in case of exception here (like KeyboardInterrupt),
# cancel the task.
self.thread.join(0.02)
except:
self.cancel()
raise
# this will raise exception that may happen inside the thread.
if raise_if_error:
self.raise_if_error()
def _download(self):
log('Download Handler thread started {}'.format(self.url))
ch = (2 ** 20) * self._chunk_size # in MBs
log("Chunk-Size:{} B".format(ch))
if self._chunk_size > 20:
stream_unit_size = (2 ** 20) * 20
else:
stream_unit_size = int( (2 ** 20) * self._chunk_size/2)
n_bytes_read = 0
# req = requests.session()
req = PACSession()
if self._proxy['user'] != '' and self._proxy['user'] is not None:
log(self._proxy['user'])
req.proxy_auth = HTTPProxyAuth(self._proxy['user'],self._proxy['pwd'])
# if self._proxy['user'] is None:
# req = requests.session()
if self._proxy['addr'] is None or self._proxy['addr'] == '':
proxies = None
else:
log(self._proxy['user'])
proxies = {'http': 'http://{}:{}'.format(self._proxy['addr'],self._proxy['port']),
'https': 'http://{}:{}'.format(self._proxy['addr'],self._proxy['port'])}
try:
with req.get(self.url, stream = True, proxies= proxies) as r:
log("Response:{}".format(r.status_code))
if (r.status_code < 200) or (r.status_code > 300):
error_text = "Download Failed status code:{}".format(r.status_code)
log(error_text, CusLog.LOG_ERROR)
self._failed()
return 1
total_bytes = int(r.headers['Content-Length'])
except requests.exceptions.ProxyError:
log('proxy error')
self._failed(1)
return 1
except Exception as e:
log("Failed to connect :{}".format(e), CusLog.LOG_ERROR)
self._failed()
return 1
log("total-Size:{} B".format(total_bytes))
overall_start = time.perf_counter()
with open(self.dest, 'ab') as file:
st1 = time.perf_counter()
sb1 = n_bytes_read
time_out_count =0
while n_bytes_read < total_bytes:
self._progress((n_bytes_read, total_bytes))
while self.is_paused():
time.sleep(.25)
#speed = "0 B/s"
self._speed()
if self.is_cancelled():
return 1
if self.is_cancelled():
return 1
header = {"Range": "bytes={}-{}".format(n_bytes_read, n_bytes_read + ch - 1)}
try:
with req.get(self.url, headers=header,stream = True, timeout=(10,10)) as r:
chunk = bytes()
for chun in r.iter_content(chunk_size=stream_unit_size):
# for chun in r.iter_content():
if self.is_cancelled():
return 1
if chun:
chunk += chun
chunklen = len(chunk)
self._progress((n_bytes_read+chunklen, total_bytes))
db = n_bytes_read+chunklen-sb1
dt = time.perf_counter()-st1
#log("{}".format(dt))
if dt > 1:
self._speed(db, dt)
sb1 = n_bytes_read+chunklen
st1 = time.perf_counter()
if self.is_paused():
break
except Exception as e:
# if len(chunk) > 0: # send write portion of chunk data that has been downloaded
# n_bytes_read += len(chunk)
# file.write(chunk)
# log("writing chunk")
self._progress((n_bytes_read, total_bytes))
time_out_count += 1
log(e, CusLog.LOG_ERROR)
log("Timeout count: {}".format(time_out_count), CusLog.LOG_ERROR)
if time_out_count % 2 ==0: # TODO: put this in a thread and exit o
log("Sleep for {} secs".format(30), CusLog.LOG_ERROR)
time.sleep(30)
continue
else:
if len(chunk) > 0: # send write chunk data that has been downloaded
n_bytes_read += len(chunk)
file.write(chunk)
self._progress((n_bytes_read, total_bytes))
self._progress((n_bytes_read , total_bytes))
self._speed()
log("download complete---time taken:{}".format(time.perf_counter()-overall_start))
self._complete()
# TODO: COMPLETE THIS DOWNLOAD FUNCTION
def is_batch(url):
if os.path.isfile(url):
log(" ")
log("Batch Download")
return True
else:
log("not Batch Download")
return False
def det_filename(filename):
out_file_name = os.path.basename(filename)
temp = out_file_name
n = 0
try:
while out_file_name in os.listdir(os.path.dirname(filename)):
n += 1
spl = os.path.splitext(temp)
# print(spl)
out_file_name = '{}({}){}'.format(spl[0], n, spl[1])
except:
return 1
return os.path.join(os.path.dirname(filename), out_file_name)
def is_valid_url(url, test = False):
#valid_url = re.findall("http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]| [! * \(\),] | (?: %[0-9a-fA-F][0-9a-fA-F]))+"
# , url)
if not test:
valid_url = re.findall("http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]| [! *,] |(?: %[0-9a-fA-F][0-9a-fA-F]))+"
, url)
if valid_url:
return True
else:
return False
else:
try:
with requests.get(url,timeout=1,stream=True) as r:
if r.status_code >199 and not r.status_code >= 300:
flag = True
else:
flag = False
return flag
except:
return False
|
agent.py
|
import os
import dill
import pickle
import random
import numpy as np
import tensorflow as tf
from ilurl.utils.default_logger import make_default_logger
from ilurl.agents.worker import AgentWorker
from ilurl.interfaces.agents import AgentInterface
from ilurl.agents.ql.choice import choice_eps_greedy
from ilurl.agents.ql.define import dpq_tls
from ilurl.agents.ql.update import dpq_update
from ilurl.agents.ql.replay_buffer import ReplayBuffer
from ilurl.agents.ql.schedules import PowerSchedule
class QL(AgentWorker, AgentInterface):
"""
Q-learning agent.
"""
def __init__(self, *args, **kwargs):
super(QL, self).__init__(*args, **kwargs)
def init(self, ql_params):
"""Instantiate Q-Learning agent.
Parameters:
----------
* ql_params: ilurl.core.params.QLParams object
Q-learning agent parameters
References:
----------
[1] Sutton et Barto, Reinforcement Learning 2nd Ed 2018
"""
tf.config.set_visible_devices([], 'GPU')
if ql_params.seed:
agent_seed = ql_params.seed + sum([ord(c) for c in ql_params.name])
random.seed(agent_seed)
np.random.seed(agent_seed)
tf.random.set_seed(agent_seed)
self._name = ql_params.name
# Whether learning stopped.
self._stop = False
# Learning rate.
self.learning_rate = 0.05
# Exploration strategy.
self.choice_type = ql_params.choice_type
# Discount factor.
self.discount_factor = ql_params.discount_factor
# Q-table.
self.Q = dpq_tls(ql_params.states.rank, ql_params.states.depth,
ql_params.actions.rank, ql_params.actions.depth,
ql_params.initial_value)
# State-action counter (for learning rate decay).
self.state_action_counter = dpq_tls(ql_params.states.rank,
ql_params.states.depth,
ql_params.actions.rank,
ql_params.actions.depth,
0)
# Epsilon-greedy (exploration rate).
if self.choice_type in ('eps-greedy',):
self.exploration = PowerSchedule(
power_coef=ql_params.eps_decay_power_coef)
# UCB (extra-stuff).
if self.choice_type in ('ucb',):
raise NotImplementedError
# Replay buffer.
self.replay_buffer = ql_params.replay_buffer
if self.replay_buffer:
self.batch_size = ql_params.replay_buffer_batch_size
self.warm_up = ql_params.replay_buffer_warm_up
self.memory = ReplayBuffer(ql_params.replay_buffer_size)
# Logger.
dir_path = f'{ql_params.exp_path}/logs/{self._name}'
self._logger = make_default_logger(directory=dir_path, label=self._name)
self._learning_logger = make_default_logger(directory=dir_path, label=f'{self._name}-learning')
# Observations counter.
self._obs_counter = 0
def get_stop(self):
return self._stop
def set_stop(self, stop):
self._stop = stop
def act(self, s):
if self._stop:
# Argmax greedy choice.
actions, values = zip(*self.Q[s].items())
choosen, _ = choice_eps_greedy(actions, values, 0)
else:
if self.choice_type in ('eps-greedy',):
actions, values = zip(*self.Q[s].items())
num_state_visits = sum(self.state_action_counter[s].values())
eps = self.exploration.value(num_state_visits)
choosen, _ = choice_eps_greedy(actions, values, eps)
elif self.choice_type in ('optimistic',):
raise NotImplementedError
elif self.choice_type in ('ucb',):
raise NotImplementedError
else:
raise NotImplementedError
self._obs_counter += 1
return int(choosen)
def update(self, s, a, r, s1):
if not self._stop:
if self.replay_buffer:
self.memory.add(s, a, r, s1, 0.0)
# Update (state, action) counter.
self.state_action_counter[s][a] += 1
# Q_old = self.q_values
qvals0 = self.sumqs()
# Q-learning update.
dpq_update(self.discount_factor, self.learning_rate, self.Q, s, a, r, s1)
if self.replay_buffer and self._obs_counter > self.warm_up:
s_samples, a_samples, r_samples, s1_samples, _ = self.memory.sample(
self.batch_size)
for sample in range(self.batch_size):
s_ = tuple(s_samples[sample])
a_ = a_samples[sample]
r_ = r_samples[sample]
s1_ = tuple(s1_samples[sample])
# Q-learning update.
dpq_update(self.discount_factor, self.learning_rate, self.Q, s_, a_, r_, s1_)
# Calculate Q-table update distance.
qvals = self.sumqs()
dist = np.abs(qvals - qvals0)
# Log values.
q_abs = np.abs(qvals)
values = {
"step": self._obs_counter,
"lr": self.learning_rate,
"expl_eps": self.exploration.value(sum(
self.state_action_counter[s].values())-1),
"q_dist": dist,
"q_resid": dist if q_abs < 1e-3 else (dist / q_abs),
}
self._learning_logger.write(values)
# Log values.
values = {
"step": self._obs_counter,
"action": a,
"reward": r,
}
self._logger.write(values)
def terminate(self):
# Nothing to do here.
pass
def save_checkpoint(self, path):
os.makedirs(f"{path}/checkpoints/{self._obs_counter}", exist_ok=True)
checkpoint_file = "{0}/checkpoints/{1}/{2}.chkpt".format(
path, self._obs_counter, self._name)
print(f'Saved chkpt: {checkpoint_file}')
with open(checkpoint_file, 'wb') as f:
t = Thread(target=pickle.dump(self.Q, f))
t.start()
def load_checkpoint(self, chkpts_dir_path, chkpt_num):
chkpt_path = '{0}/{1}/{2}.chkpt'.format(chkpts_dir_path,
chkpt_num,
self._name)
print(f'Loaded chkpt: {chkpt_path}')
with open(chkpt_path, 'rb') as f:
self.Q = dill.load(f)
def sumqs(self):
"""Sum of all q-values"""
ret = 0
for state, actions in self.Q.items():
ret += sum(saval for saval in actions.values())
return ret
|
wsgi.py
|
import base64
import logging
import multiprocessing
import os
import pickle
import re
import threading
import time
from datetime import datetime
from email.utils import formatdate
from io import BytesIO
import pylibmc
import requests
from c3nav.mapdata.utils.cache import CachePackage
from c3nav.mapdata.utils.tiles import (build_access_cache_key, build_base_cache_key, build_tile_etag, get_tile_bounds,
parse_tile_access_cookie)
loglevel = logging.DEBUG if os.environ.get('C3NAV_DEBUG') else os.environ.get('LOGLEVEL', 'INFO')
logging.basicConfig(level=loglevel,
format='[%(asctime)s] [%(process)s] [%(levelname)s] %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %z')
logger = logging.getLogger('c3nav')
if os.environ.get('C3NAV_LOGFILE'):
logging.basicConfig(filename=os.environ['C3NAV_LOGFILE'])
class TileServer:
def __init__(self):
self.path_regex = re.compile(r'^/(\d+)/(-?\d+)/(-?\d+)/(-?\d+).png$')
self.cookie_regex = re.compile(r'(^| )c3nav_tile_access="?([^;" ]+)"?')
try:
self.upstream_base = os.environ['C3NAV_UPSTREAM_BASE'].strip('/')
except KeyError:
raise Exception('C3NAV_UPSTREAM_BASE needs to be set.')
try:
self.data_dir = os.environ.get('C3NAV_DATA_DIR', 'data')
except KeyError:
raise Exception('C3NAV_DATA_DIR needs to be set.')
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self.tile_secret = os.environ.get('C3NAV_TILE_SECRET', None)
if not self.tile_secret:
tile_secret_file = None
try:
tile_secret_file = os.environ['C3NAV_TILE_SECRET_FILE']
self.tile_secret = open(tile_secret_file).read().strip()
except KeyError:
raise Exception('C3NAV_TILE_SECRET or C3NAV_TILE_SECRET_FILE need to be set.')
except FileNotFoundError:
raise Exception('The C3NAV_TILE_SECRET_FILE (%s) does not exist.' % tile_secret_file)
self.reload_interval = int(os.environ.get('C3NAV_RELOAD_INTERVAL', 60))
self.auth_headers = {'X-Tile-Secret': base64.b64encode(self.tile_secret.encode())}
self.cache_package = None
self.cache_package_etag = None
self.cache_package_filename = None
cache = self.get_cache_client()
wait = 1
while True:
success = self.load_cache_package(cache=cache)
if success:
logger.info('Cache package successfully loaded.')
break
logger.info('Retrying after %s seconds...' % wait)
time.sleep(wait)
wait = min(10, wait*2)
threading.Thread(target=self.update_cache_package_thread, daemon=True).start()
@staticmethod
def get_cache_client():
return pylibmc.Client(["127.0.0.1"], binary=True, behaviors={"tcp_nodelay": True, "ketama": True})
def update_cache_package_thread(self):
cache = self.get_cache_client() # different thread → different client!
while True:
time.sleep(self.reload_interval)
self.load_cache_package(cache=cache)
def get_date_header(self):
return 'Date', formatdate(timeval=time.time(), localtime=False, usegmt=True)
def load_cache_package(self, cache):
logger.debug('Downloading cache package from upstream...')
try:
headers = self.auth_headers.copy()
if self.cache_package_etag is not None:
headers['If-None-Match'] = self.cache_package_etag
r = requests.get(self.upstream_base+'/map/cache/package.tar.xz', headers=headers)
if r.status_code == 403:
logger.error('Rejected cache package download with Error 403. Tile secret is probably incorrect.')
return False
if r.status_code == 304:
if self.cache_package is not None:
logger.debug('Not modified.')
cache['cache_package_filename'] = self.cache_package_filename
return True
logger.error('Unexpected not modified.')
return False
r.raise_for_status()
except Exception as e:
logger.error('Cache package download failed: %s' % e)
return False
logger.debug('Recieving and loading new cache package...')
try:
self.cache_package = CachePackage.read(BytesIO(r.content))
self.cache_package_etag = r.headers.get('ETag', None)
except Exception as e:
logger.error('Cache package parsing failed: %s' % e)
return False
try:
self.cache_package_filename = os.path.join(
self.data_dir,
datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')+'.pickle'
)
with open(self.cache_package_filename, 'wb') as f:
pickle.dump(self.cache_package, f)
cache.set('cache_package_filename', self.cache_package_filename)
except Exception as e:
self.cache_package_etag = None
logger.error('Saving pickled package failed: %s' % e)
return False
return True
def not_found(self, start_response, text):
start_response('404 Not Found', [self.get_date_header(),
('Content-Type', 'text/plain'),
('Content-Length', str(len(text)))])
return [text]
def internal_server_error(self, start_response, text=b'internal server error'):
start_response('500 Internal Server Error', [self.get_date_header(),
('Content-Type', 'text/plain'),
('Content-Length', str(len(text)))])
return [text]
def deliver_tile(self, start_response, etag, data):
start_response('200 OK', [self.get_date_header(),
('Content-Type', 'image/png'),
('Content-Length', str(len(data))),
('Cache-Control', 'no-cache'),
('ETag', etag)])
return [data]
def get_cache_package(self):
try:
cache_package_filename = self.cache.get('cache_package_filename')
except pylibmc.Error as e:
logger.warning('pylibmc error in get_cache_package(): %s' % e)
cache_package_filename = None
if cache_package_filename is None:
logger.warning('cache_package_filename went missing.')
return self.cache_package
if self.cache_package_filename != cache_package_filename:
logger.debug('Loading new cache package in worker.')
self.cache_package_filename = cache_package_filename
with open(self.cache_package_filename, 'rb') as f:
self.cache_package = pickle.load(f)
return self.cache_package
cache_lock = multiprocessing.Lock()
@property
def cache(self):
cache = self.get_cache_client()
self.__dict__['cache'] = cache
return cache
def __call__(self, env, start_response):
path_info = env['PATH_INFO']
match = self.path_regex.match(path_info)
if match is None:
return self.not_found(start_response, b'invalid tile path.')
level, zoom, x, y = match.groups()
zoom = int(zoom)
if not (-2 <= zoom <= 5):
return self.not_found(start_response, b'zoom out of bounds.')
# do this to be thread safe
try:
cache_package = self.get_cache_package()
except Exception as e:
logger.error('get_cache_package() failed: %s' % e)
return self.internal_server_error(start_response)
# check if bounds are valid
x = int(x)
y = int(y)
minx, miny, maxx, maxy = get_tile_bounds(zoom, x, y)
if not cache_package.bounds_valid(minx, miny, maxx, maxy):
return self.not_found(start_response, b'coordinates out of bounds.')
# get level
level = int(level)
level_data = cache_package.levels.get(level)
if level_data is None:
return self.not_found(start_response, b'invalid level.')
# build cache keys
last_update = level_data.history.last_update(minx, miny, maxx, maxy)
base_cache_key = build_base_cache_key(last_update)
# decode access permissions
access_permissions = set()
access_cache_key = '0'
cookie = env.get('HTTP_COOKIE', None)
if cookie:
cookie = self.cookie_regex.search(cookie)
if cookie:
cookie = cookie.group(2)
access_permissions = (parse_tile_access_cookie(cookie, self.tile_secret) &
set(level_data.restrictions[minx:maxx, miny:maxy]))
access_cache_key = build_access_cache_key(access_permissions)
# check browser cache
if_none_match = env.get('HTTP_IF_NONE_MATCH')
tile_etag = build_tile_etag(level, zoom, x, y, base_cache_key, access_cache_key, self.tile_secret)
if if_none_match == tile_etag:
start_response('304 Not Modified', [self.get_date_header(),
('Content-Length', '0'),
('ETag', tile_etag)])
return [b'']
cache_key = path_info+'_'+tile_etag
cached_result = self.cache.get(cache_key)
if cached_result is not None:
return self.deliver_tile(start_response, tile_etag, cached_result)
r = requests.get('%s/map/%d/%d/%d/%d/%s.png' % (self.upstream_base, level, zoom, x, y, access_cache_key),
headers=self.auth_headers)
if r.status_code == 200 and r.headers['Content-Type'] == 'image/png':
self.cache.set(cache_key, r.content)
return self.deliver_tile(start_response, tile_etag, r.content)
start_response('%d %s' % (r.status_code, r.reason), [
self.get_date_header(),
('Content-Length', len(r.content)),
('Content-Type', r.headers.get('Content-Type', 'text/plain'))
])
return [r.content]
application = TileServer()
|
subproc_vec_env.py
|
from multiprocessing import Process, Pipe
import numpy as np
from stable_baselines.common.vec_env import VecEnv, CloudpickleWrapper
from stable_baselines.common.tile_images import tile_images
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == 'step':
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == 'reset':
observation = env.reset()
remote.send(observation)
elif cmd == 'render':
remote.send(env.render(*data[0], **data[1]))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments
:param env_fns: ([Gym Environment]) Environments to run in subprocesses
"""
def __init__(self, env_fns):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(n_envs)])
self.processes = [Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for process in self.processes:
process.daemon = True # if the main process crashes, we should not cause things to hang
process.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for process in self.processes:
process.join()
self.closed = True
def render(self, mode='human', *args, **kwargs):
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(('render', (args, {'mode': 'rgb_array', **kwargs})))
imgs = [pipe.recv() for pipe in self.remotes]
# Create a big image by tiling images from subprocesses
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
for pipe in self.remotes:
pipe.send(('render', {"mode": 'rgb_array'}))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
|
client.py
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 02.12.2019
# by David Zashkolny
# 3 course, comp math
# Taras Shevchenko National University of Kyiv
# email: davendiy@gmail.com
from .constants import *
from .logger import logger
from .security import *
import threading
import socket
import pickle
import json
import time
from queue import Queue
TIMEOUT = 40
MAIN_SERVER_HOST = 'localhost'
MAIN_SERVER_PORT = 25000
DATA_SERVER_HOST = 'localhost'
DATA_SERVER_PORT = 25001
lock = threading.RLock()
background_queue = Queue()
class ServerError(Exception):
pass
def background_socket_receiving(client_aux_socket):
logger.info(f"[*] Starting of background receiving...")
logger.debug(f"[-->] Sending {READY_FOR_TRANSFERRING} to aux server...")
# client_aux_socket.sendall(READY_FOR_TRANSFERRING)
while True:
try:
logger.debug(f"[<--] Fetching {READY_FOR_TRANSFERRING} from aux server...")
resp = client_aux_socket.recv(ATOM_LENGTH)
if not resp:
time.sleep(1)
continue
elif resp != READY_FOR_TRANSFERRING:
logger.error(f'[*] Got {resp} from aux server.')
continue
client_aux_socket.sendall(READY_FOR_TRANSFERRING)
logger.debug(f"[<--] Fetching json from aux server...")
resp = client_aux_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got {resp} from aux sever.")
try:
data = json.loads(str(resp, encoding='utf-8'))
except (ValueError, TypeError):
logger.error(BAD_JSON_FORMAT)
continue
content_type = data[CONTENT_TYPE]
content_size = data[CONTENT_SIZE]
if content_type != CHATS_LIST:
chat_name = data[CHAT_NAME]
else:
chat_name = ''
signature = data[SIGNATURE_OF_SERVER]
logger.debug(f'[-->] Sending {READY_FOR_TRANSFERRING} to the aux server...')
client_aux_socket.sendall(READY_FOR_TRANSFERRING)
res_data = b''
logger.debug(f"[<--] Fetching {content_size} bytes from aux server...")
for _ in range(0, content_size, CHUNK):
res_data += client_aux_socket.recv(CHUNK)
done = len(res_data)
if (content_size - done) > 0:
res_data += client_aux_socket.recv(content_size - done)
verify_control_message(res_data, bytes.fromhex(signature))
tmp = pickle.loads(res_data)
background_queue.put((content_type, chat_name, tmp))
except socket.error:
client_aux_socket.close()
break
except Exception as e:
logger.exception(e)
continue
def background_printer():
while True:
tmp = background_queue.get()
with lock:
print(tmp)
class ConnectionManager:
def __init__(self, main_socket: socket.socket, aux_socket: socket.socket):
self._main_socket = main_socket
self._aux_socket = aux_socket
self._logged_in = False
self._logged_name = ""
self._in_chat = None
self._connected = False
self._aux_connected = False
self._current_messages = []
self._current_chats = []
self._current_chat_members = []
self._user_chats = None
def background_worker(self):
while True:
content_type, chat_name, value = background_queue.get()
logger.debug(f"[*] Got from queue: {content_type}, {chat_name}, {value}.")
if content_type == NEW_MESSAGE:
with lock:
self._current_messages.append(value)
elif content_type == CHATS_LIST:
with lock:
self._current_chats = value
elif content_type == CHAT_MEMBERS:
with lock:
self._current_chat_members = value
elif content_type == CHAT_MESSAGES:
with lock:
self._current_messages = value
elif content_type == NEW_USER:
with lock:
self._current_chat_members.add(value)
else:
logger.error(f"Unknown type from queue: {content_type}.")
def register(self, user_name, password):
assert not self._logged_in
logger.info(f"[*] Starting process of registration...")
logger.debug(f"[-->] Sending {REGISTRATION}...")
self._main_socket.sendall(REGISTRATION)
logger.debug(f"[<--] Getting answer from server...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got: {resp}.")
if resp != READY_FOR_TRANSFERRING:
raise ServerError(f"Incorrect server response: {resp}")
self._confirm_server()
data = JSON_REGISTRATION_TEMPLATE.copy()
data[NAME] = user_name
data[PASSWORD] = password
logger.debug(f"[-->] Sending to the server {data}...")
self._main_socket.sendall(bytes(json.dumps(data), encoding='utf-8'))
logger.debug(f"[<--] Fetching response...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got {resp}.")
if resp == READY_FOR_TRANSFERRING:
self._aux_connection()
else:
raise ServerError(str(resp, encoding='utf-8'))
self._logged_in = True
self._logged_name = user_name
logger.info(f'[*] Process of registration done.')
threading.Thread(target=background_socket_receiving,
args=(self._aux_socket,), daemon=True).start()
def _aux_connection(self):
logger.info(f"[*] Starting the process of creating aux connection...")
logger.debug(f"[-->] Sending {READY_FOR_TRANSFERRING}...")
self._main_socket.sendall(READY_FOR_TRANSFERRING)
logger.debug(f"[<--] Receiving the control phrase from the server...")
check_phrase = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got {check_phrase}.")
logger.debug(f"[*] Connecting to the server for data transferring...")
self._aux_socket.connect((DATA_SERVER_HOST, DATA_SERVER_PORT))
logger.debug(f"[-->] Sending {check_phrase} to the aux server...")
self._aux_socket.sendall(check_phrase)
logger.debug(f"[<--] Fetching the response from aux server...")
resp = self._aux_socket.recv(ATOM_LENGTH)
if resp == READY_FOR_TRANSFERRING:
self._aux_connected = True
else:
raise ServerError(str(resp, encoding='utf-8'))
logger.info(f"[*] Process of creating aux connection done.")
def _confirm_server(self):
logger.info(f"[*] Starting process of server confirmation...")
control_message = generate_control_message()
logger.debug(f"[-->] Sending control message: {control_message}")
self._main_socket.sendall(control_message)
logger.debug(f"[<--] Getting response from the server...")
resp = self._main_socket.recv(256)
logger.debug(f"[*] Got: {resp}")
verify_control_message(control_message, resp)
logger.info(f"[*] Process of server confirmation done.")
def log_in(self, user_name, password):
assert not self._logged_in
logger.info(f"[*] Starting process of logging in...")
logger.debug(f"[-->] Sending {SIGN_IN}...")
self._main_socket.sendall(SIGN_IN)
logger.debug(f"[<--] Getting answer from server...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got: {resp}.")
if resp != READY_FOR_TRANSFERRING:
raise ServerError(f"Incorrect server response: {resp}")
self._confirm_server()
data = JSON_SIGN_IN_TEMPLATE.copy()
data[NAME] = user_name
data[PASSWORD] = password
logger.debug(f"[-->] Sending to the server {data}...")
self._main_socket.sendall(bytes(json.dumps(data), encoding='utf-8'))
logger.debug(f"[<--] Fetching response...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got {resp}.")
if resp == READY_FOR_TRANSFERRING:
self._aux_connection()
else:
raise ServerError(str(resp, encoding='utf-8'))
self._logged_in = True
self._logged_name = user_name
logger.info(f'[*] Process of logging in done.')
threading.Thread(target=background_socket_receiving,
args=(self._aux_socket,), daemon=True).start()
def create_chat(self, chat_name, members):
assert self._logged_in
assert self._in_chat is None
logger.info(f"[*] Starting process of chat creating...")
logger.debug(f"[-->] Sending {CREATE_CHAT} to the main server.")
self._main_socket.sendall(CREATE_CHAT)
logger.debug(f"[<--] Fetching response from the main server...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got: {resp}.")
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
data = pickle.dumps(members)
metadata = JSON_CREATE_CHAT_FORMAT.copy()
metadata[NAME] = chat_name
metadata[CONTENT_TYPE] = CHAT_MEMBERS
metadata[CONTENT_SIZE] = len(data)
logger.debug(f"[-->] Sending {metadata} to the main server...")
self._main_socket.sendall(bytes(json.dumps(metadata), encoding='utf-8'))
logger.debug(f"[<--] Fetching response from the main server...")
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
logger.debug(f"[-->] Sending all the members to the server...")
self._main_socket.sendall(data)
logger.info(f"[*] Process of chat creating is done")
self._in_chat = chat_name
def open_chat(self, chat: Chat):
assert self._logged_in
assert self._in_chat is None
logger.info(f"[*] Starting process of opening {chat}...")
logger.debug(f"[-->] Sending {OPEN_CHAT} to the main server.")
self._main_socket.sendall(OPEN_CHAT)
logger.debug(f"[<--] Fetching response from the main server...")
resp = self._main_socket.recv(ATOM_LENGTH)
logger.debug(f"[*] Got: {resp}.")
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
data = JSON_OPEN_CHAT_FORMAT.copy()
data[NAME] = chat.name
data[CONTENT_TYPE] = CHAT
logger.debug(f'[*] Sending {data} to the main server...')
self._main_socket.sendall(bytes(json.dumps(data), encoding='utf-8'))
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
self._in_chat = chat.name
def exit_chat(self):
assert self._logged_in
assert self._in_chat
self._main_socket.sendall(EXIT_FROM_CHAT)
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
self._in_chat = None
self._current_messages = []
self._current_chat_members = set()
def message(self, message: Message):
assert self._logged_in
assert self._in_chat
self._main_socket.sendall(MESSAGE)
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
self._confirm_server()
content = message.content.encode('utf-8')
metadata = JSON_MESSAGE_TEMPLATE.copy()
metadata[CONTENT_TYPE] = message.content_type
metadata[CONTENT_SIZE] = len(content)
self._main_socket.sendall(bytes(json.dumps(metadata), encoding='utf-8'))
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
self._main_socket.sendall(content)
resp = self._main_socket.recv(ATOM_LENGTH)
if resp != READY_FOR_TRANSFERRING:
raise ServerError(resp)
def find_chat(self, user_name):
pass
def start(self, username='test18', password='test_password', register=False):
logger.info("[*] Creating main connection...")
self._main_socket.connect((MAIN_SERVER_HOST, MAIN_SERVER_PORT))
if register:
self.register(username, password)
else:
self.log_in(username, password)
threading.Thread(target=self.background_worker, daemon=True).start()
time.sleep(2)
while True:
# time.sleep(2)
command = input("Please, enter the command:\n--> ")
if command == 'open chat':
with lock:
name = input("Please, enter the chat name:\n--> ")
self.open_chat(Chat(name, '', ''))
elif command == 'create chat':
with lock:
name = input("Please, enter the name of new chat:\n--> ")
self.create_chat(name, [])
elif command == 'members':
with lock:
for row in self._current_chat_members:
print(row)
elif command == 'exit':
if self._in_chat:
self.exit_chat()
else:
exit(0)
elif command == 'message':
with lock:
text = input("Text:\n")
self.message(Message(self._logged_name, '', '', TEXT, text))
elif command == "status":
with lock:
print(f"Logged as {self._logged_name}, \n"
f"Opened chat: {self._in_chat}, \n")
elif command == "chats":
with lock:
for row in self._current_chats:
print(row)
elif command == 'messages':
with lock:
for row in self._current_messages:
print(row)
else:
print("Bad command")
_main_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_aux_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = ConnectionManager(_main_socket, _aux_socket)
|
navFunctions.py
|
'''
Author: Sam Schmidt
Purpose: navigation functions
Date: 02/06/15 - 03/01/15
Dependencies: numpy, scipy, matplotlib, pandas, serial, time, os
'''
###imports
from __future__ import division #enables default float division
import numpy as np #used for signal processing
import scipy.signal as signal #used for signal processing
import matplotlib.pyplot as plt #used for plotting, mainly for debugging
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd #has better CSV capabilities than numpy
import serial #used to communicate with the sensors
import time #used for timing sections of code, and will be used to meter sensor communications
import Queue #used to communicate between threads
import imu_comm_new as imu #used to communicate with the IMU
import threading #used to make threads
from otherFunctions import * #populate name space with otherFunctions code
from ins_sim import * #populate name space with ins_sim code
# import os #could be used for parallel threads
#################################################################################################################
######################## Entering the INS functions #############################################################
#################################################################################################################
##IIR filter class
#Filter used to remove high frequency (anything above 2 Hz) noise from the accelerometer
#currently configured for a Chebychev Type 2 IIR lowpass filter
class digitalFilter(object):
def __init__(self,fc = [2,3],fs = 100,passGain = 1, stopGain = 70, analog = False, ftype = 'cheby2'):
#filter coefficients
self.b,self.a = signal.iirdesign(2*fc[0]/fs,2*fc[1]/fs,passGain,stopGain,analog,ftype)
#filter memory
self.zi = np.zeros(max([len(self.b),len(self.a)]))
pass
def iirFilt(self,x,reset = False):
#reset is here incase somthing bad happens elsewhere
if reset:
self.zi = np.zeros(max([len(self.b),len(self.a)]))
#implement direct form transpose II
self.zi[0] = x - (self.a[1:]*self.zi[1:]).sum()
y = (self.b*self.zi).sum()
self.zi = np.roll(self.zi,1)
self.zi[0] = x
return y
#class used to navigate
#handles talking to the sensors and implements all navigation equations, it also controls the Kalman filter
class kalmanControl(object):
def __init__(self,sim='lin',leverArm = np.zeros((3,1)),measNoise = np.zeros((15,15))):
#set up the accel filters
# self.axFilter = digitalFilter()
# self.ayFilter = digitalFilter()
# self.azFilter = digitalFilter()
# ###############################
# #test class for the imu
# self.sim = sim
# self.imu_comm = imu_sim(convert2decimalDeg(3.852355371e+03)*np.pi/180.,
# convert2decimalDeg(1.044188916e+04)*np.pi/180.,convert2decimalDeg(3.952355371e+03)*np.pi/180.,\
# convert2decimalDeg(1.044188916e+04)*np.pi/180.,alti = 2004,sim = self.sim,simTime = 3600, \
# updateRate = 100,noise = True,bias = True)
# print self.imu_comm.biasA
# print self.imu_comm.biasG
# ###############################
################# Constants #########################
#sample rate
self.fs = 100.00
self.ts = 1/self.fs
#degrees to radian conversion
self.d2r = np.pi/180.
#Earth's radius
self.R0 = 6378137 #metres
#eccentricity of the WGS84 ellipsoid
self.eccenSquare = 6.69437999014e-3
#Earth's rotational velocity
self.omegaE = 7.2921159e-5
#flattening of the wgs84 ellipsoid
self.flat = 1/298.257223563
#Earth's gravitational constant
self.mu = 3.986004418e14
################# Kalman filter matrices ###############################
#helper matrices for the measurement noise matrix
A = np.vstack((np.hstack((.0000008*np.eye(3),np.zeros((3,3)))),np.hstack((np.zeros((3,3)),1e-3*np.eye(3))),np.zeros((3,6))))
D = np.vstack((np.hstack((np.zeros((3,3)),1e-14*np.eye(3),np.zeros((3,3)))),np.hstack((np.zeros((3,6)),5e-8*np.eye(3)))))
#measurement noise matrix: commonly called Q
self.measNoise = np.vstack((np.hstack((A,np.zeros((9,9)))),np.hstack((np.zeros((6,6)),D))))*self.ts
#noise covarience of the aiding system: commonly called R
self.noiseCovar = np.array([[2.5066e-10,0,0,0,0,0],
[0,2.9692e-10,0,0,0,0],
[0,0,.30849e-5,0,0,0],
[0,0,0,1e-3,0,0],
[0,0,0,0,1e-3,0],
[0,0,0,0,0,1e-3]])
#curvilinear position scaling matrix
self.cLinMat = np.array([[10e3,0,0],
[0,10e3,0],
[0,0,1]]) #100 on element 3,3 worked well
#lever arm from the IMU to the aiding system, default is a column of zeros to simplify stuff
self.leverArm = leverArm
#helper matrices for the measurement matrix
A = np.vstack((np.zeros((3,6)),np.hstack((np.zeros((3,3)),-1*np.eye(3)))))
B = np.vstack((-self.cLinMat,np.zeros((3,3))))
#measurement matrix commonly called: H
self.measMatrix = np.hstack((A,B,np.zeros((6,6))))
self.biasA = np.zeros((3,1))
self.biasG = np.zeros(3) #might need to add in earths rotation
self.biasM = np.zeros(3)
#--------------------------Setup sensor communications----------------#
'''
Sensors comm is handled by through python threads, see information on
python's global interpreter lock (GIL) if the kalman filter starts
bottle necking things
'''
#init imu
check = imu.init_imu()
assert(check == 0)
#configure accel for default op
fullScaleA = "2G"
rate = "100HZ"
check = imu.configAccel(rate, fullScaleA)
assert(check == 0)
#configure Mag for default op
fullScaleM = "4GAUSS"
rate = "100HZ"
check = imu.configMag(rate, fullScaleM)
assert(check == 0)
#configure gyro for default op
fullScaleG = "245DPS"
check = imu.configGyro(fullScaleG)
assert(check == 0)
self.q = Queue.Queue()
t1 = threading.Thread(target=imu.returnIMU, args=(self.q,"2G","245DPS","4GAUSS"))
t2 = threading.Thread(target=imu.recordSerial, args=(self.q,'COM3'))
t2.start()
t1.start()
# for item in source():
# q.put(item)
pass
def getIMUGPS(self,gravConst = 0):
#this function will pull a value from the IMU, filter it and return stuff
#inputs:
#-gravConst is the current estimate of the positions gravity
#grab the data, assume imu data is structured as (ax,ay,az,gx,gy,gz,mx,my,mz)
#big impressive routine
if not self.q.empty():
data = self.q.get()
#check GPS
if '$' in data:
try:
#GPS
data = data.split(',')
self.gpsPos = np.array([[float(data[11])],[float(data[13])],[float(data[18])]])
self.gpsVelNed = float(data[7])*np.array([[1],[1],[1]])*(1000/3600.)
except:
print 'Bad Data! noob'
#Then look for new IMU
data = self.q.get()
while '$' in data:
data = self.q.get()
#unpack data
self.accelMeas = np.asarray([[data[0]],[data[1]],[data[2]]])
self.rotMeas = np.asarray([data[3],data[4],data[5]])
self.magMeas = np.asarray([data[6],data[7],data[8]])
#scale accel by g
self.accelMeas *= 9.8
#remove accel Bias
self.accelMeas -= self.biasA
#pass accel through the filter
# self.accelMeas[0] = self.axFilter.iirFilt(self.accelMeas[0])
# self.accelMeas[1] = self.ayFilter.iirFilt(self.accelMeas[1])
# self.accelMeas[2] = self.azFilter.iirFilt(self.accelMeas[2])
#convert rotaions from dps to rad/s
self.rotMeas *= self.d2r
#remove gyro bias
self.rotMeas -= self.biasG
#store the rotaions as a skew sym matrix
self.rotMeasSkew = np.array([[0,-self.rotMeas[2],self.rotMeas[1]],[self.rotMeas[2],0,self.rotMeas[0]],[-self.rotMeas[1],self.rotMeas[0],0]])
#get magnitude of gyro
self.magRotMeas = np.sqrt(np.dot(self.rotMeas,self.rotMeas))*self.ts
#-get GPS measurements
# self.gpsPos = np.array([[imu[9]],[imu[10]],[imu[11]]])
# self.gpsVelNed = imu[12]*np.array([[1],[1],[1]])#*np.array([[np.sin(self.state[2])*np.cos(self.state[1])],[np.cos(self.state[2])*np.cos(self.state[1])],[np.sin(self.state[1])]])
def startup(self):
#this function will determine the starting orientation, heading, and position
#it will also instansiate the kalman filter class
#init latitude, longitude, and altitude
self.lat = convert2decimalDeg(3.852355371e+03)*self.d2r
self.lon = convert2decimalDeg(1.044188916e+04)*self.d2r
self.alti = 2004
#get grave constant
self.g0 = 9.7803253359*((1 + 0.001931853*(np.sin(self.lat))**2)/np.sqrt(1 - self.eccenSquare*(np.sin(self.lat))**2))
#init velocity
if self.sim == 'lin' or self.sim == 'stat':
self.velNed = np.array([[0],[0],[0]])
else:
self.velNed = np.array([[0],[500*(2*np.pi/3600.)],[0]])
#init state vector
self.state = 10e-18*np.ones(15)
#init state transition matrix
self.PHI = 10e-24*np.eye(15)
#init covarience matrix
self.covar = 10e-24*np.eye(15)
#get the earths rotation in NED coordinates
self.earthRotINNed = (self.omegaE)*np.array([[0,np.sin(self.lat),0],
[-np.sin(self.lat),0,-np.cos(self.lat)],
[0,np.cos(self.lat),0]])
#estimate the bias values, use 30 seconds of data to do the estimation
biasEstimatorA = np.zeros((3,1))
biasEstimatorG = np.zeros(3)
biasEstimatorM = np.zeros(3)
# print biasEstimator + self.imu_comm.sample(self.lat,self.alti)
sampleCount = 0
while(not sampleCount == 3000):
self.getIMUGPS()
biasEstimatorA += self.accelMeas
biasEstimatorG += self.rotMeas
biasEstimatorM += self.magMeas
sampleCount += 1
biasEstimator /= 3000
#init bias values
self.biasG = np.asarray([biasEstimator[3],biasEstimator[4],biasEstimator[5]])
self.biasA = np.asarray([[biasEstimator[0]],[biasEstimator[1]],[biasEstimator[2]]])
print 'estimated gyro bias: ',self.biasG
print 'estimated Accel bias: ',self.biasA
#init body to nav frame rotation matrix
self.rotB2N = self.earthRotINNed
#calculate the earths radius
self.RE = self.R0/(np.sqrt(1-self.eccenSquare*(np.sin(self.lat))**2)) #transverse radius
self.RN = (self.R0*(1-self.eccenSquare))/((1 - self.eccenSquare*(np.sin(self.lat))**2)**(1.5))
#get the rotation matrix to resolve to the difference between a earth centered frame and the local NED frame
rotEarthandNed = np.array([[self.velNed[1]/(self.RE + self.alti)],[-self.velNed[0]/(self.RN + self.alti)],[-np.tan(self.lat)*self.velNed[1]/(self.RE + self.alti)]])
self.rotEarthandNedSkew = np.array([[0,-rotEarthandNed[2],rotEarthandNed[1]],[rotEarthandNed[2],0,-rotEarthandNed[0]],[-rotEarthandNed[1],rotEarthandNed[0],0]])
pass
def getRot(self):
#method to update the rotation matrix from the gyro readings
#this is the rotation from the body frame to the navigation frame
self.prevRotB2N = self.rotB2N
# print self.rotB2N
#updated gyro matrix
rotBP2BM = np.eye(3) + (np.sin(self.magRotMeas)/self.magRotMeas)*self.rotMeasSkew + ((1-np.cos(self.magRotMeas))/self.magRotMeas)*(np.dot(self.rotMeasSkew,self.rotMeasSkew))
# print self.magRotMeas
#matrix to account for the Earth's rotation in a local reference frame
self.earthRotINNed = (self.omegaE)*np.array([[0,np.sin(self.lat),0],[-np.sin(self.lat),0,-np.cos(self.lat)],[0,np.cos(self.lat),0]])
#calculate teh earfs radius
self.RE = self.R0/(np.sqrt(1-self.eccenSquare*(np.sin(self.lat))**2)) #transverse radius
self.RN = (self.R0*(1-self.eccenSquare))/((1 - self.eccenSquare*(np.sin(self.lat))**2)**(1.5))
rotEarthandNed = np.array([[self.velNed[1]/(self.RE + self.alti)],[-self.velNed[0]/(self.RN + self.alti)],[-np.tan(self.lat)*self.velNed[1]/(self.RE + self.alti)]])
self.rotEarthandNedSkew = np.array([[0,-rotEarthandNed[2],rotEarthandNed[1]],[rotEarthandNed[2],0,-rotEarthandNed[0]],[-rotEarthandNed[1],rotEarthandNed[0],0]])
#update rotation matrix
self.rotB2N = np.dot(self.prevRotB2N,rotBP2BM) - np.dot((self.earthRotINNed + self.rotEarthandNedSkew),self.prevRotB2N*self.ts)
def getVel(self):
#method to get the velocity from the accels
self.prevVelNed = self.velNed
#update rotation matrix
rotBB2BM = np.eye(3) + ((1-np.cos(self.magRotMeas))/(self.magRotMeas)**2)*self.rotMeasSkew + (1/(self.magRotMeas)**2)*(1 - (np.sin(self.magRotMeas)/self.magRotMeas))*(np.dot(self.rotMeasSkew,self.rotMeasSkew))
rotB2NBar = np.dot(self.prevRotB2N,rotBB2BM) - 0.5*np.dot((self.earthRotINNed + self.rotEarthandNedSkew),self.prevRotB2N*self.ts)
rotAccel = np.dot(rotB2NBar,self.accelMeas) #NOTE this is commented out, since I don't simulate gyro stuff....
#rotAccel = self.accelMeas
#get teh gravsssz
self.g0 = 9.7803253359*((1 + 0.001931853*(np.sin(self.lat))**2)/np.sqrt(1 - self.eccenSquare*(np.sin(self.lat))**2))
gN = (-8.08e-9)*self.alti*np.sin(2*self.lat)
gD = self.g0*(1 - (1 + self.flat*(1-2*(np.sin(self.lat)**2)) + ((self.omegaE*self.R0)**2)*(6356752.3142)/self.mu)*(2*self.alti/self.R0) + (3/(self.R0**2))*(self.alti**2))
self.gVec = np.array([[gN],[0],[gD]])
#update velocity
self.velNed = self.prevVelNed + (rotAccel + self.gVec - np.dot((self.earthRotINNed + 2*self.rotEarthandNedSkew),self.prevVelNed))*self.ts
def getPos(self):
#this method will get the current position of the system resolved in geodetic coordinates
#get altitude
newAlti = self.alti - (self.ts/2.)*(self.prevVelNed[2] + self.velNed[2])
#get latitude
#-first calculate the meridian radius of curvature
RNPrevLat = (self.R0*(1-self.eccenSquare))/((1 - self.eccenSquare*(np.sin(self.lat))**2)**(1.5))
newLat = self.lat + (self.ts/2.)*((self.prevVelNed[0]/(RNPrevLat + self.alti)) + (self.velNed[0]/(RNPrevLat + newAlti)))
#get new longitude
#-first calculate the new and old transverse radius
REPrevLat = self.R0/(np.sqrt(1-self.eccenSquare*(np.sin(self.lat))**2))
RENewLat = self.R0/(np.sqrt(1-self.eccenSquare*(np.sin(newLat[0]))**2))
newLon = self.lon + (self.ts/2.)*((self.prevVelNed[1]/((REPrevLat + self.alti)*np.cos(self.lat))) + (self.velNed[1]/((RENewLat + newAlti[0])*np.cos(newLat[0]))))
#update position
self.alti = newAlti[0]
self.lat = newLat[0]
self.lon = newLon[0]
'''
------------------------Begin Kalman Filter---------------------------------
'''
################# calculate the transition matrix ##########################
def buildStateTranMatrix(self):
#need to recalulate the RE and RN with the newest corrected latitude, longitude, and altitude
self.prevPHI = self.PHI
#get submatrices
self.f11 = -(self.earthRotINNed + self.rotEarthandNedSkew)
self.f12 = np.array([[0,-1/(self.RE + self.alti),0],
[1/(self.RE + self.alti),0,0],
[0,np.tan(self.lat)/(self.RE + self.alti),0]])
self.f13 = np.array([[self.omegaE*np.sin(self.lat),0,self.velNed[1]/(self.RE + self.alti)**2],
[0,0,-self.velNed[0][0]/(self.RN + self.alti)**2],
[self.omegaE*np.cos(self.lat) + self.velNed[1][0]/((self.RE + self.alti)*(np.cos(self.lat)**2)),0,-np.tan(self.lat)*self.velNed[1][0]/(self.RE + self.alti)**2]])
self.f21 = -1*np.array([[0,-np.dot(self.rotB2N[2,:],self.accelMeas),np.dot(self.rotB2N[1,:],self.accelMeas)],
[np.dot(self.rotB2N[2,:],self.accelMeas),0,-np.dot(self.rotB2N[0,:],self.accelMeas)],
[-np.dot(self.rotB2N[1,:],self.accelMeas),np.dot(self.rotB2N[0,:],self.accelMeas),0]])
self.f22 = np.array([[self.velNed[2][0]/(self.RN + self.alti),-2*np.tan(self.lat)*self.velNed[1][0]/(self.RE + self.alti) - 2*self.omegaE*np.sin(self.lat),self.velNed[0][0]/(self.RN + self.alti)],
[np.tan(self.lat)*self.velNed[1][0]/(self.RE + self.alti) + 2*self.omegaE*np.sin(self.lat),(self.velNed[0][0]*np.tan(self.lat) + self.velNed[2][0])/(self.RE + self.alti),self.velNed[1][0]/(self.RE + self.alti) - 2*self.omegaE*np.sin(self.lat)],
[-2*self.velNed[0][0]*(self.RN + self.alti),-2*self.velNed[1][0]*(self.RE + self.alti) - 2*self.omegaE*np.cos(self.lat),0]])
self.f23 = np.array([[-(((self.velNed[1][0]**2)*((1/np.cos(self.lat))**2))/(self.RE + self.alti)) - 2*self.velNed[1][0]*self.omegaE*np.cos(self.lat),0,((((self.velNed[1][0]**2)*((np.tan(self.lat))))/((self.RE + self.alti)**2))) - ((self.velNed[0][0]*self.velNed[2][0])/(((self.RN + self.alti)**2)))],
[(((self.velNed[1][0]*self.velNed[0][0])*((1/np.cos(self.lat))**2))/(self.RE + self.alti)) + 2*self.velNed[0][0]*self.omegaE*np.cos(self.lat) - 2*self.velNed[2][0]*self.omegaE*np.sin(self.lat),0,-(((self.velNed[0][0]*self.velNed[1][0])*((np.tan(self.lat))) + (self.velNed[1][0]*self.velNed[2][0])))/((self.RE + self.alti)**2)],
[2*self.velNed[1][0]*self.omegaE*np.sin(self.lat),0,(self.velNed[1][0]**2)/((self.RE + self.alti)**2) + (self.velNed[0][0]**2)/((self.RN + self.alti)**2) - (2*self.g0)/(self.RE*np.sqrt(1 + (-2*np.sqrt(self.eccenSquare) + self.eccenSquare)*(np.sin(self.lat))**2))]])
self.f32 = np.array([[1/(self.RN + self.alti),0,0],
[0,1/((self.RE + self.alti)*np.cos(self.lat)),0],
[0,0,-1]])
self.f33 = np.array([[0,0,self.velNed[0][0]/(self.RN + self.alti)**2],
[self.velNed[1][0]*np.sin(self.lat)/((self.RE + self.alti)*(np.cos(self.lat)**2)),0,-self.velNed[1][0]*np.sin(self.lat)/(((self.RE + self.alti)**2)*(np.cos(self.lat)))],
[0,0,0]])
#print self.f22
#I feel like there will be a lot of dimension problems here....
self.PHI = np.eye(15) + (np.vstack((np.hstack((self.f11,self.f12,self.f13,np.zeros((3,3)),self.rotB2N)),
np.hstack((self.f21,self.f22,self.f23,self.rotB2N,np.zeros((3,3)))),
np.hstack((np.zeros((3,3)),self.f32,self.f33,np.zeros((3,3)),np.zeros((3,3)))),
np.zeros((6,15)))))*self.ts
################# calculate covarience #####################################
def getCovar(self):
#self.prevCovar = self.covar
self.covar = np.dot(self.prevPHI,np.dot(self.covar,self.prevPHI.T)) + self.measNoise
################# Update states ############################################
def update(self):
#map curve pertubations from Cartesian to curvilinear
T = np.array([[1/(self.RN + self.alti),0,0],[0,1/((self.RE + self.alti)*np.cos(self.lat)),0],[0,0,-1]])
#update states
newState = np.dot(self.PHI,self.state)
#need to think about the implementation of this, might need a least squares solution
self.kalmanGain = np.dot(self.covar,np.dot(self.measMatrix.T,np.linalg.inv(np.dot(self.measMatrix,np.dot(self.covar,self.measMatrix.T)) + self.noiseCovar)))
#form the innovation
#print self.gpsPos
self.measInnov = np.vstack((np.dot(self.cLinMat,(self.gpsPos - np.array([[self.lat],[self.lon],[self.alti]]) - np.dot(T,np.dot(self.rotB2N,self.leverArm))))
,self.gpsVelNed-self.velNed - np.dot(self.rotB2N,np.dot(self.rotMeasSkew,self.leverArm)) + np.dot(self.earthRotINNed,np.dot(self.rotB2N,self.leverArm)))).flatten()
#update the states
#self.state = self.state + np.dot(self.kalmanGain,self.measInnov)
self.state = newState + np.dot(self.kalmanGain,self.measInnov)
#update the covarience
self.covar = np.dot((np.eye(15) - np.dot(self.kalmanGain,self.measMatrix)),self.covar)
'''
-------------------------End Kalman Filter---------------------------------
'''
#this method is called by navigate to estimate the errors
def estimateError(self):
self.buildStateTranMatrix()
self.getCovar()
self.update()
"""
''Public'' methods, these are called to generate the navigation solution
"""
#this method brings everything together and navigates
def navigate(self):
#get imu and gps data
self.getIMUGPS()
#advance the kalman filter
self.estimateError()
#print self.state
#get new rotation
self.getRot()
#get new velocity
self.getVel()
#get new position
self.getPos()
#correct the measurements
self.lat -= self.state[6]
self.lon -= self.state[7]
self.alti -= self.state[8]
self.velNed[0] -= self.state[3]
self.velNed[1] -= self.state[4]
self.velNed[2] -= self.state[5]
self.rotB2N = np.dot((np.eye(3) - np.array([[0,-self.state[2],self.state[1]],
[self.state[2],0,-self.state[0]],
[-self.state[1],self.state[0],0]])),self.rotB2N)
self.biasA -= np.asarray([[self.state[9]],[self.state[10]],[self.state[11]]])
self.biasG -= np.asarray([self.state[12],self.state[13],self.state[14]])
#zero the states
self.prevState = self.state
self.state = np.zeros_like(self.state)
#recalulate the Earths radius
self.RE = self.R0/(np.sqrt(1-self.eccenSquare*(np.sin(self.lat))**2)) #transverse radius
self.RN = (self.R0*(1-self.eccenSquare))/((1 - self.eccenSquare*(np.sin(self.lat))**2)**(1.5))
#return
return np.hstack((self.lat,self.lon,self.alti,self.velNed.T.flatten(),self.prevState.flatten())).flatten()
##end kalmonControl class
|
utils.py
|
from os import path, remove
import subprocess
from threading import Thread
from urllib.parse import urlparse
import re
from win32com.shell import shell, shellcon
from PIL import Image, ImageTk
import win32api
import win32con
import win32ui
import win32gui
from pytube import YouTube
try:
from constants import *
except:
from .constants import *
def threaded(fn):
"""To use as decorator to make a function call threaded.
Needs import
from threading import Thread"""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def get_icon(ext, size='large'):
file = open(f"temp{ext}", "w")
file.close()
PATH = f'temp{ext}'
SHGFI_ICON = 0x000000100
SHGFI_ICONLOCATION = 0x000001000
if size == "small":
SHIL_SIZE= 0x00001
elif size == "large":
SHIL_SIZE= 0x00002
else:
raise TypeError("Invalid argument for 'size'. Must be equal to 'small' or 'large'")
ret, info = shell.SHGetFileInfo(PATH, 0, SHGFI_ICONLOCATION | SHGFI_ICON | SHIL_SIZE)
hIcon, iIcon, dwAttr, name, typeName = info
ico_x = win32api.GetSystemMetrics(win32con.SM_CXICON)
hdc = win32ui.CreateDCFromHandle(win32gui.GetDC(0))
hbmp = win32ui.CreateBitmap()
hbmp.CreateCompatibleBitmap(hdc, ico_x, ico_x)
hdc = hdc.CreateCompatibleDC()
hdc.SelectObject(hbmp)
hdc.DrawIcon((0, 0), hIcon)
win32gui.DestroyIcon(hIcon)
bmpinfo = hbmp.GetInfo()
bmpstr = hbmp.GetBitmapBits(True)
img = Image.frombuffer(
"RGBA",
(bmpinfo["bmWidth"], bmpinfo["bmHeight"]),
bmpstr, "raw", "BGRA", 0, 1
)
remove(PATH)
if size == "small":
img = img.resize((16, 16), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
return img
def is_downloaded(url, dst):
head, tail = path.split(url)
return path.exists(dst+tail)
def get_filename_from_url(url):
# https://stackoverflow.com/a/18727481
a = urlparse(url)
return os.path.basename(a.path)
def open_path(url, dst):
head, tail = path.split(url)
# print(dst, tail)
subprocess.Popen(r'explorer /select,"{}"'.format(dst+ '\\' +tail))
def is_url(url):
# https://stackoverflow.com/a/7160778
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, url) is not None
def is_youtube_link(url):
# https://stackoverflow.com/a/19161373
return ('youtube.com' in url) or ('youtu.be' in url)
# regex = (
# r'(https?://)?(www\.)?'
# '(youtube|youtu|youtube-nocookie)\.(com|be)/'
# '(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
# return re.match(regex, url) is not None
def validator(url, dst=DEST):
if not url:
return '* no URL given !!'
if not is_url(url):
return '* Entre a valid URl please'
if is_downloaded(url, dst):
return '* This file is exist, delete it if you want redownload..'
return 0
def youtube_direct_link(url):
yt = YouTube(url)
url = yt.streams.first().url
title = yt.streams[0].title
try:
title = title[:20].replace(' ', '_') + '.mp4'
except:
title = title.replace(' ', '_') + '.mp4'
dst = os.path.join(DEST, title)
return url, dst, title
if __name__ == '__main__':
get_icon('zip')
|
http_server.py
|
#!/usr/bin/env python
# Many tests expect there to be an http server on port 4545 servering the deno
# root directory.
import os
import sys
from threading import Thread
import SimpleHTTPServer
import SocketServer
from util import root_path
from time import sleep
PORT = 4545
REDIRECT_PORT = 4546
def server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", PORT), Handler)
print "Deno test server http://localhost:%d/" % PORT
return s
def redirect_server():
os.chdir(root_path)
target_host = "http://localhost:%d" % PORT
class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location', target_host + self.path)
self.end_headers()
Handler = RedirectHandler
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", REDIRECT_PORT), Handler)
print "Deno redirect server http://localhost:%d/ -> http://localhost:%d/" % (
REDIRECT_PORT, PORT)
return s
def spawn():
# Main http server
s = server()
thread = Thread(target=s.serve_forever)
thread.daemon = True
thread.start()
# Redirect server
rs = redirect_server()
r_thread = Thread(target=rs.serve_forever)
r_thread.daemon = True
r_thread.start()
sleep(1) # TODO I'm too lazy to figure out how to do this properly.
if __name__ == '__main__':
try:
spawn()
while True:
sleep(100)
except KeyboardInterrupt:
sys.exit()
|
plugin.py
|
import requests.exceptions
import time
from aqt import mw
from aqt.utils import showInfo, askUser, showWarning
from aqt.qt import *
from anki.utils import splitFields, ids2str
from .duolingo_dialog import duolingo_dialog
from .duolingo import Duolingo, LoginFailedException
from .duolingo_model import get_duolingo_model
from .duolingo_thread import DuolingoThread
def sync_duolingo():
model = get_duolingo_model(mw)
if not model:
showWarning("Could not find or create Duolingo Sync note type.")
return
note_ids = mw.col.findNotes('tag:duolingo_sync')
notes = mw.col.db.list("select flds from notes where id in {}".format(ids2str(note_ids)))
gids_to_notes = {splitFields(note)[0]: note for note in notes}
try:
username, password = duolingo_dialog(mw)
except TypeError:
return
if username and password:
try:
mw.progress.start(immediate=True, label="Logging in...")
login_thread = DuolingoThread(target=Duolingo, args=(username, password))
login_thread.start()
while login_thread.is_alive():
time.sleep(.02)
mw.progress.update()
lingo = login_thread.join()
vocabulary_thread = DuolingoThread(target=lingo.get_vocabulary)
vocabulary_thread.start()
mw.progress.update(label="Retrieving vocabulary...")
while vocabulary_thread.is_alive():
time.sleep(.02)
mw.progress.update()
vocabulary_response = vocabulary_thread.join()
except LoginFailedException:
showWarning(
"""
<p>Logging in to Duolingo failed. Please check your Duolingo credentials.</p>
<p>Having trouble logging in? You must use your <i>Duolingo</i> username and password.
You <i>can't</i> use your Google or Facebook credentials, even if that's what you use to
sign in to Duolingo.</p>
<p>You can find your Duolingo username at
<a href="https://www.duolingo.com/settings">https://www.duolingo.com/settings</a> and you
can create or set your Duolingo password at
<a href="https://www.duolingo.com/settings/password">https://www.duolingo.com/settings/password</a>.</p>
"""
)
return
except requests.exceptions.ConnectionError:
showWarning("Could not connect to Duolingo. Please check your internet connection.")
return
finally:
mw.progress.finish()
language_string = vocabulary_response['language_string']
vocabs = vocabulary_response['vocab_overview']
did = mw.col.decks.id("Default")
mw.col.decks.select(did)
deck = mw.col.decks.get(did)
deck['mid'] = model['id']
mw.col.decks.save(deck)
words_to_add = [vocab for vocab in vocabs if vocab['id'] not in gids_to_notes]
if not words_to_add:
showInfo("Successfully logged in to Duolingo, but no new words found in {} language.".format(language_string))
elif askUser("Add {} notes from {} language?".format(len(words_to_add), language_string)):
word_chunks = [words_to_add[x:x + 50] for x in range(0, len(words_to_add), 50)]
mw.progress.start(immediate=True, label="Importing from Duolingo...", max=len(words_to_add))
notes_added = 0
for word_chunk in word_chunks:
translations = lingo.get_translations([vocab['word_string'] for vocab in word_chunk])
for vocab in word_chunk:
n = mw.col.newNote()
n['Gid'] = vocab['id']
n['Gender'] = vocab['gender'] if vocab['gender'] else ''
n['Source'] = '; '.join(translations[vocab['word_string']])
n['Target'] = vocab['word_string']
n['Target Language'] = language_string
n.addTag(language_string)
n.addTag('duolingo_sync')
if vocab['pos']:
n.addTag(vocab['pos'])
if vocab['skill']:
n.addTag(vocab['skill'].replace(" ", "-"))
mw.col.addNote(n)
notes_added += 1
mw.progress.update(value=notes_added)
showInfo("{} notes added".format(notes_added))
mw.moveToState("deckBrowser")
mw.progress.finish()
action = QAction("Pull from Duolingo", mw)
action.triggered.connect(sync_duolingo)
mw.form.menuTools.addAction(action)
|
ftp.py
|
import os
import threading
import logging
from app.classes.helpers import helper
from app.classes.models import Ftp_Srv, MC_settings
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import TLS_FTPHandler
from pyftpdlib.servers import ThreadedFTPServer
logger = logging.getLogger(__name__)
class ftp_server():
def __init__(self):
self.root_dir = None
self.user = None
self.password = None
self.port = None
self.server = None
self.ftp_server_thread = None
self.running = False
self.last_error = None
def get_root_dir(self):
return self.root_dir
def set_root_dir(self, full_path):
self.root_dir = full_path
def setup_ftp(self, server_id=1):
ftp_settings = None
mc_settings = None
try:
ftp_settings = Ftp_Srv.get_by_id(1)
mc_settings = MC_settings.get_by_id(server_id)
except Exception as e:
logging.exception("Error Loading FTP. Traceback:")
self.last_error = e
return False
pemfile = os.path.join(helper.crafty_root, "app", 'web', 'certs', 'crafty.pem')
if not helper.check_file_exists(pemfile):
helper.create_ftp_pem()
if ftp_settings is not None and mc_settings is not None:
self.user = ftp_settings.user
self.password = ftp_settings.password
self.port = ftp_settings.port
self.root_dir = mc_settings.server_path
logger.info("FTP server is now setup - Port: {}, Dir: {}".format(self.port, self.root_dir))
def _ftp_serve(self, server_id=1):
self.setup_ftp(server_id)
authorizer = DummyAuthorizer()
authorizer.add_user(self.user, self.password, self.root_dir, perm='elradfmwMT')
handler = TLS_FTPHandler
crafty_root = os.path.abspath(helper.crafty_root)
certfile = os.path.join(crafty_root, 'app', 'web', 'certs', 'crafty.pem')
handler.certfile = certfile
handler.authorizer = authorizer
self.server = ThreadedFTPServer(('0.0.0.0', self.port), handler)
self.running = True
self.server.serve_forever()
def run_threaded_ftp_server(self, server_id=1):
self.running = True
logger.info("Ftp Server Started for server ID: {}".format(server_id))
self.ftp_server_thread = threading.Thread(target=self._ftp_serve, args=[server_id], daemon=True)
self.ftp_server_thread.start()
def stop_threaded_ftp_server(self):
self.running = False
self.server.close_all()
def check_running(self):
return self.running
ftp_svr_object = ftp_server()
|
BatchBiogeochemicalReactionModelServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from BatchBiogeochemicalReactionModel.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'BatchBiogeochemicalReactionModel'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from BatchBiogeochemicalReactionModel.BatchBiogeochemicalReactionModelImpl import BatchBiogeochemicalReactionModel # noqa @IgnorePep8
impl_BatchBiogeochemicalReactionModel = BatchBiogeochemicalReactionModel(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'BatchBiogeochemicalReactionModel'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_BatchBiogeochemicalReactionModel.run_BatchBiogeochemicalReactionModel,
name='BatchBiogeochemicalReactionModel.run_BatchBiogeochemicalReactionModel',
types=[dict])
self.method_authentication['BatchBiogeochemicalReactionModel.run_BatchBiogeochemicalReactionModel'] = 'required' # noqa
self.rpc_service.add(impl_BatchBiogeochemicalReactionModel.run_cstr,
name='BatchBiogeochemicalReactionModel.run_cstr',
types=[dict])
self.method_authentication['BatchBiogeochemicalReactionModel.run_cstr'] = 'required' # noqa
self.rpc_service.add(impl_BatchBiogeochemicalReactionModel.status,
name='BatchBiogeochemicalReactionModel.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'BatchBiogeochemicalReactionModel ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
capture_trajectory.py
|
import argparse
import gym
from kuka.env import KukaPoseEnv
import numpy as np
from pybullet_envs.bullet import KukaGymEnv
from util import write_video, ensure_folder
from PIL import Image
import multiprocessing
def get_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument('--out', default='tmp/')
parser.add_argument('-n', default=10, type=int)
parser.add_argument('--no-write', action='store_true')
parser.add_argument('--num-processes', type=int, default=4)
args = parser.parse_args()
args.write = not args.no_write
return args
REWARD_CUTOFF = 1.0
def generate_trajectory(i, env, args):
done = False
env.reset()
action = env.action_space.sample()
frames = []
while not done:
_, reward, done, frame = env.step(env.env.goal)
frames.append(np.array(frame))
#img = Image.fromarray(frame)
#img.show()
# reward check to make sure the end point is reachable.
if reward < REWARD_CUTOFF and args.write:
print("writing: ", i)
write_video("{}.mp4".format(i), args.out, frames)
elif reward < REWARD_CUTOFF:
generate_trajectory(i, env, args)
class TrajectoryMachine(object):
def __init__(self):
self.env = gym.make('KukaTrajectoryEnv-v1')
def __call__(self, queue, args):
while 1:
try:
i = queue.get(timeout=1)
generate_trajectory(i, self.env, args)
except (multiprocessing.TimeoutError, multiprocessing.Queue.Empty):
break
def main(args):
ensure_folder(args.out)
queue = multiprocessing.Queue(args.n)
for i in range(args.n):
queue.put(i)
processes = []
for i in range(args.num_processes):
process = multiprocessing.Process(target=TrajectoryMachine(), args=(queue, args))
process.start()
processes.append(process)
for p in processes:
p.join()
if __name__ == '__main__':
path = "./tmp/video/"
main(get_cli_args())
|
data_buffer_test.py
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
from collections import namedtuple
import os
import tempfile
from time import sleep
import torch
from absl.testing import parameterized
import alf
from alf.tensor_specs import TensorSpec
from alf.utils.data_buffer import RingBuffer, DataBuffer
from alf.utils.checkpoint_utils import Checkpointer
DataItem = namedtuple("DataItem", ["env_id", "x", "t", "o", "reward"])
# Using cpu tensors are needed for running on cuda enabled devices,
# as we are not using the spawn method to start subprocesses.
def get_batch(env_ids, dim, t, x):
batch_size = len(env_ids)
x = torch.as_tensor(x, dtype=torch.float32, device="cpu")
t = torch.as_tensor(t, dtype=torch.int32, device="cpu")
ox = (x * torch.arange(
batch_size, dtype=torch.float32, requires_grad=True,
device="cpu").unsqueeze(1) * torch.arange(
dim, dtype=torch.float32, requires_grad=True,
device="cpu").unsqueeze(0))
a = x * torch.ones(batch_size, dtype=torch.float32, device="cpu")
g = torch.zeros(batch_size, dtype=torch.float32, device="cpu")
# reward function adapted from ReplayBuffer: default_reward_fn
r = torch.where(
torch.abs(a - g) < .05,
torch.zeros(batch_size, dtype=torch.float32, device="cpu"),
-torch.ones(batch_size, dtype=torch.float32, device="cpu"))
return DataItem(
env_id=torch.tensor(env_ids, dtype=torch.int64, device="cpu"),
x=ox,
t=t * torch.ones(batch_size, dtype=torch.int32, device="cpu"),
o=dict({
"a": a,
"g": g
}),
reward=r)
class RingBufferTest(parameterized.TestCase, alf.test.TestCase):
dim = 20
max_length = 4
num_envs = 8
def __init__(self, *args):
super().__init__(*args)
alf.set_default_device("cpu") # spawn forking is required to use cuda.
self.data_spec = DataItem(
env_id=alf.TensorSpec(shape=(), dtype=torch.int64),
x=alf.TensorSpec(shape=(self.dim, ), dtype=torch.float32),
t=alf.TensorSpec(shape=(), dtype=torch.int32),
o=dict({
"a": alf.TensorSpec(shape=(), dtype=torch.float32),
"g": alf.TensorSpec(shape=(), dtype=torch.float32)
}),
reward=alf.TensorSpec(shape=(), dtype=torch.float32))
@parameterized.named_parameters([
('test_sync', False),
('test_async', True),
])
def test_ring_buffer(self, allow_multiprocess):
ring_buffer = RingBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
allow_multiprocess=allow_multiprocess)
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=1, x=0.4)
if not allow_multiprocess:
# enqueue: blocking mode only available under allow_multiprocess
self.assertRaises(
AssertionError,
ring_buffer.enqueue,
batch1,
env_ids=batch1.env_id,
blocking=True)
# Test dequeque()
for t in range(2, 10):
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
# test that the created batch has gradients
self.assertTrue(batch1.x.requires_grad)
ring_buffer.enqueue(batch1, batch1.env_id)
if not allow_multiprocess:
# dequeue: blocking mode only available under allow_multiprocess
self.assertRaises(
AssertionError,
ring_buffer.dequeue,
env_ids=batch1.env_id,
blocking=True)
# Exception because some environments do not have data
self.assertRaises(AssertionError, ring_buffer.dequeue)
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.t, torch.tensor([6] * 5))
# test that RingBuffer detaches gradients of inputs
self.assertFalse(batch.x.requires_grad)
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.t, torch.tensor([7] * 5))
batch = ring_buffer.dequeue(env_ids=torch.tensor([1, 2]))
self.assertEqual(batch.t, torch.tensor([8] * 2))
batch = ring_buffer.dequeue(env_ids=batch1.env_id)
self.assertEqual(batch.t, torch.tensor([[9], [9], [8], [8], [8]]))
# Exception because some environments do not have data
self.assertRaises(
AssertionError, ring_buffer.dequeue, env_ids=batch1.env_id)
# Test dequeue multiple
ring_buffer.clear()
for t in range(5, 10):
batch1 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
# test that the created batch has gradients
ring_buffer.enqueue(batch1, batch1.env_id)
# Normal dequeue in the middle of the ring buffer
batch = ring_buffer.dequeue(env_ids=batch1.env_id, n=2)
self.assertEqual(batch.t, torch.tensor([[6, 7]] * 5))
# This dequeue crosses the end of the ring buffer
batch = ring_buffer.dequeue(env_ids=batch1.env_id, n=2)
self.assertEqual(batch.t, torch.tensor([[8, 9]] * 5))
# Test remove_up_to
ring_buffer.remove_up_to(4)
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
ring_buffer.enqueue(batch2)
prev_size = ring_buffer._current_size.clone()
prev_pos = ring_buffer._current_pos.clone()
ring_buffer.remove_up_to(2)
self.assertEqual(prev_size - 2, ring_buffer._current_size)
# shouldn't change last data pos
self.assertEqual(prev_pos, ring_buffer._current_pos)
# remove_up_to more than there are elements shouldn't raise error
ring_buffer.remove_up_to(3)
self.assertEqual(ring_buffer._current_size, torch.tensor([0] * 8))
if allow_multiprocess:
# Test block on dequeue without enough data
def delayed_enqueue(ring_buffer, batch):
alf.set_default_device("cpu")
sleep(0.04)
ring_buffer.enqueue(batch, batch.env_id)
p = mp.Process(
target=delayed_enqueue,
args=(ring_buffer,
alf.nest.map_structure(lambda x: x.cpu(), batch1)))
p.start()
batch = ring_buffer.dequeue(env_ids=batch1.env_id, blocking=True)
self.assertEqual(batch.t, torch.tensor([9] * 2))
# Test block on enqueue without free space
ring_buffer.clear()
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
ring_buffer.enqueue(batch2)
def delayed_dequeue():
# cpu tensor on subprocess. Otherwise, spawn method is needed.
alf.set_default_device("cpu")
sleep(0.04)
ring_buffer.dequeue() # 6(deleted), 7, 8, 9
sleep(0.04) # 10, 7, 8, 9
ring_buffer.dequeue() # 10, 7(deleted), 8, 9
p = mp.Process(target=delayed_dequeue)
p.start()
batch2 = get_batch(range(0, 8), self.dim, t=10, x=0.4)
ring_buffer.enqueue(batch2, blocking=True)
p.join()
self.assertEqual(ring_buffer._current_size[0], torch.tensor([3]))
# Test stop queue event
def blocking_dequeue(ring_buffer):
ring_buffer.dequeue(blocking=True)
p = mp.Process(target=blocking_dequeue, args=(ring_buffer, ))
ring_buffer.clear()
p.start()
sleep(0.02) # for subprocess to enter while loop
ring_buffer.stop()
p.join()
self.assertEqual(
ring_buffer.dequeue(env_ids=batch1.env_id, blocking=True),
None)
ring_buffer.revive()
for t in range(6, 10):
batch2 = get_batch(range(0, 8), self.dim, t=t, x=0.4)
self.assertEqual(
ring_buffer.enqueue(batch2, blocking=True), True)
ring_buffer.stop()
self.assertEqual(ring_buffer.enqueue(batch2, blocking=True), False)
class DataBufferTest(alf.test.TestCase):
def test_data_buffer(self):
dim = 20
capacity = 256
data_spec = (TensorSpec(shape=()), TensorSpec(shape=(dim // 3 - 1, )),
TensorSpec(shape=(dim - dim // 3, )))
data_buffer = DataBuffer(data_spec=data_spec, capacity=capacity)
def _get_batch(batch_size):
x = torch.randn(batch_size, dim, requires_grad=True)
x = (x[:, 0], x[:, 1:dim // 3], x[..., dim // 3:])
return x
data_buffer.add_batch(_get_batch(100))
self.assertEqual(int(data_buffer.current_size), 100)
batch = _get_batch(1000)
# test that the created batch has gradients
self.assertTrue(batch[0].requires_grad)
data_buffer.add_batch(batch)
ret = data_buffer.get_batch(2)
# test that DataBuffer detaches gradients of inputs
self.assertFalse(ret[0].requires_grad)
self.assertEqual(int(data_buffer.current_size), capacity)
ret = data_buffer.get_batch_by_indices(torch.arange(capacity))
self.assertEqual(ret[0], batch[0][-capacity:])
self.assertEqual(ret[1], batch[1][-capacity:])
self.assertEqual(ret[2], batch[2][-capacity:])
batch = _get_batch(100)
data_buffer.add_batch(batch)
ret = data_buffer.get_batch_by_indices(
torch.arange(data_buffer.current_size - 100,
data_buffer.current_size))
self.assertEqual(ret[0], batch[0])
self.assertEqual(ret[1], batch[1])
self.assertEqual(ret[2], batch[2][-capacity:])
# Test checkpoint working
with tempfile.TemporaryDirectory() as checkpoint_directory:
checkpoint = Checkpointer(
checkpoint_directory, data_buffer=data_buffer)
checkpoint.save(10)
data_buffer = DataBuffer(data_spec=data_spec, capacity=capacity)
checkpoint = Checkpointer(
checkpoint_directory, data_buffer=data_buffer)
global_step = checkpoint.load()
self.assertEqual(global_step, 10)
ret = data_buffer.get_batch_by_indices(
torch.arange(data_buffer.current_size - 100,
data_buffer.current_size))
self.assertEqual(ret[0], batch[0])
self.assertEqual(ret[1], batch[1])
self.assertEqual(ret[2], batch[2][-capacity:])
data_buffer.clear()
self.assertEqual(int(data_buffer.current_size), 0)
if __name__ == '__main__':
alf.test.main()
|
Waiters.py
|
import time
import consts
import random
import logging
import requests
import threading
logger = logging.getLogger(__name__)
class Waiters:
def __init__(self, dh, i):
self.name = f'{dh.name} Waiter-{i}'
self.dh = dh
self.id = i
self.TIME_UNIT = 1
self.initial = True
def serve_order(self, order_to_serve):
# calculate total order time
order_total_preparing_time = int(time.time() - order_to_serve['time_start'])
# check if the order is the same order that what was requested
req_order = next((order for i, order in enumerate(self.dh.orders) if 'id' in order and order['id'] == order_to_serve['order_id']), None)
if req_order is not None and req_order['items'].sort() == order_to_serve['items'].sort():
# update table state
table_idx = next((i for i, table in enumerate(self.dh.tables) if table.id == order_to_serve['table_id']), None)
self.dh.tables[table_idx].status = 'ORDER_SERVED'
# calculate nr of start
stars = self.rating_stars(order_to_serve['max_wait'], order_total_preparing_time)
self.dh.update_rating({'order_id': order_to_serve['order_id'], 'stars': stars})
served_order = {**order_to_serve, 'total_preparing_time': order_total_preparing_time}
self.dh.done_orders.append(served_order)
logger.info(f'{self.name}-$ SERVED orderId: {served_order["order_id"]} | '
f'table_id: {served_order["table_id"]} | '
f'max_wait: {served_order["max_wait"]} | '
f'total_preparing_time: {served_order["total_preparing_time"]} sec. | '
f'stars | : {stars}')
threading.Thread(target=self.dh.tables[table_idx].serve).start()
else:
raise Exception(f'{self.name} The order is not the same as was requested. Original: {req_order}, given: {order_to_serve}')
def search_order(self, free_tables):
while True:
try:
if not free_tables.empty():
# pick some free table
table = free_tables.get_nowait()
if table:
order = table.generate_order()
self.dh.orders.append(order)
time.sleep(random.randint(2, 4) * self.dh.TIME_UNIT)
logger.info(f'{self.name}-$ PICKED UP orderId: {order["id"]} | priority: {order["priority"]} | items: {order["items"]}')
req = {
'order_id': order['id'],
'table_id': order['table_id'],
'waiter_id': self.id,
'items': order['items'],
'priority': order['priority'],
'max_wait': order['max_wait'],
'time_start': time.time()
}
requests.post(f'http://{consts.KH_HOST}:{self.dh.config["kitchen_port"]}/order', json=req, timeout=0.0000000001)
except Exception as e:
pass
@staticmethod
def rating_stars(max_wait, total):
stars = 0
if max_wait >= total:
stars = 5
elif max_wait * 1.1 >= total:
stars = 4
elif max_wait * 1.2 >= total:
stars = 3
elif max_wait * 1.3 >= total:
stars = 2
elif max_wait * 1.4 >= total:
stars = 1
return stars
|
test_debug.py
|
import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.http import Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
from django.views.debug import (
CallableSettingWrapper, ExceptionCycleWarning, ExceptionReporter,
Path as DebugPath, SafeExceptionReporterFilter, default_urlconf,
get_default_exception_reporter_filter, technical_404_response,
technical_500_response,
)
from django.views.decorators.debug import (
sensitive_post_parameters, sensitive_variables,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(response, '<code>not-in-urls</code>, didn’t match', status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, '<code>not-in-urls</code>, didn’t match', status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, 'The empty path didn’t match any of these.', status_code=404)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match['id']
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, 'open') as m:
default_urlconf(None)
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):
response = self.client.get('/path-post/1/')
self.assertContains(response, 'Page not found', status_code=404)
def test_exception_reporter_from_request(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/custom_reporter_class_view/')
self.assertContains(response, 'custom traceback text', status_code=500)
@override_settings(DEFAULT_EXCEPTION_REPORTER='view_tests.views.CustomExceptionReporter')
def test_exception_reporter_from_settings(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
self.assertContains(response, 'custom traceback text', status_code=500)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_suppressed_context(self):
try:
try:
raise RuntimeError("Can't find my keys")
except RuntimeError:
raise ValueError("Can't find my keys") from None
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
self.assertNotIn('During handling of the above exception', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_source_not_match(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
with mock.patch(
'django.views.debug.ExceptionReporter._get_source',
return_value=['wrong source'],
):
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
msg = (
"Cycle in the exception chain detected: exception 'inner' "
"encountered again."
)
with self.assertWarnsMessage(ExceptionCycleWarning, msg):
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, 'open') as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding='utf-8')
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_cleanse_setting_basic(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(reporter_filter.cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(
reporter_filter.cleanse_setting('PASSWORD', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_ignore_case(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_recurses_in_dictionary(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {'login': 'cooper', 'password': 'secret'}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{'login': 'cooper', 'password': reporter_filter.cleansed_substitute},
)
def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {('localhost', 8000): {'login': 'cooper', 'password': 'secret'}}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{
('localhost', 8000): {
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
},
},
)
def test_cleanse_setting_recurses_in_list_tuples(self):
reporter_filter = SafeExceptionReporterFilter()
initial = [
{
'login': 'cooper',
'password': 'secret',
'apps': (
{'name': 'app1', 'api_key': 'a06b-c462cffae87a'},
{'name': 'app2', 'api_key': 'a9f4-f152e97ad808'},
),
'tokens': ['98b37c57-ec62-4e39', '8690ef7d-8004-4916'],
},
{'SECRET_KEY': 'c4d77c62-6196-4f17-a06b-c462cffae87a'},
]
cleansed = [
{
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
'apps': (
{'name': 'app1', 'api_key': reporter_filter.cleansed_substitute},
{'name': 'app2', 'api_key': reporter_filter.cleansed_substitute},
),
'tokens': reporter_filter.cleansed_substitute,
},
{'SECRET_KEY': reporter_filter.cleansed_substitute},
]
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
cleansed,
)
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', tuple(initial)),
tuple(cleansed),
)
def test_request_meta_filtering(self):
request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret')
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.get_safe_request_meta(request)['HTTP_SECRET_HEADER'],
reporter_filter.cleansed_substitute,
)
def test_exception_report_uses_meta_filtering(self):
response = self.client.get('/raises500/', HTTP_SECRET_HEADER='super_secret')
self.assertNotIn(b'super_secret', response.content)
response = self.client.get(
'/raises500/',
HTTP_SECRET_HEADER='super_secret',
HTTP_ACCEPT='application/json',
)
self.assertNotIn(b'super_secret', response.content)
class CustomExceptionReporterFilter(SafeExceptionReporterFilter):
cleansed_substitute = 'XXXXXXXXXXXXXXXXXXXX'
hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL', flags=re.I)
@override_settings(
ROOT_URLCONF='view_tests.urls',
DEFAULT_EXCEPTION_REPORTER_FILTER='%s.CustomExceptionReporterFilter' % __name__,
)
class CustomExceptionReporterFilterTests(SimpleTestCase):
def setUp(self):
get_default_exception_reporter_filter.cache_clear()
def tearDown(self):
get_default_exception_reporter_filter.cache_clear()
def test_setting_allows_custom_subclass(self):
self.assertIsInstance(
get_default_exception_reporter_filter(),
CustomExceptionReporterFilter,
)
def test_cleansed_substitute_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_hidden_settings_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('database_url', 'super_secret'),
reporter_filter.cleansed_substitute,
)
class NonHTMLResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
The plain text 500 debug-only error page is served when it has been
detected the request doesn't accept HTML content. Don't check for
(non)existence of frames vars in the traceback information section of the
response content because they're not included in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_ACCEPT='application/json')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_non_html_response_encoding(self):
response = self.client.get('/raises500/', HTTP_ACCEPT='application/json')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class DecoratorsTests(SimpleTestCase):
def test_sensitive_variables_not_called(self):
msg = (
'sensitive_variables() must be called to use it as a decorator, '
'e.g., use @sensitive_variables(), not @sensitive_variables.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_variables
def test_func(password):
pass
def test_sensitive_post_parameters_not_called(self):
msg = (
'sensitive_post_parameters() must be called to use it as a '
'decorator, e.g., use @sensitive_post_parameters(), not '
'@sensitive_post_parameters.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_post_parameters
def test_func(request):
return index_page(request)
|
test_signalDelay.py
|
import pathlib
import sys
sys.path.insert(0, str(pathlib.Path(__file__).parent.parent))
import logging
import multiprocessing as mp
from jobmanager import signalDelay
import os
import signal
import time
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter(fmt="%(asctime)s|%(name)s|%(levelname)s|%(msg)s"))
signalDelay.log.setLevel(logging.DEBUG)
signalDelay.log.addHandler(ch)
v = mp.Value('I')
sleep_time = 0.1
def no_output():
return
f = open('/dev/null', 'w')
sys.stdout = f
sys.stderr = f
def test_sd():
def _important_func(v):
no_output()
try:
for i in range(10):
v.value = i
time.sleep(sleep_time)
except KeyboardInterrupt:
# this prevents the traceback
pass
@signalDelay.sig_delay([signal.SIGINT])
def _important_func_with_dec(v):
_important_func(v)
# call _important_func in a subprocess and send SIGINT after 1 second
# the subprocess will terminate immediately
# v should be smaller than 5
p = mp.Process(target=_important_func, args=(v,))
p.start()
time.sleep(2*sleep_time)
os.kill(p.pid, signal.SIGINT)
p.join()
assert v.value < 5
assert p.exitcode == 0 # since the KeyboardInterrupt Error is caught and ignored
# call _important_func in a subprocess and send SIGINT after 1 second
# the subprocess will terminate immediately
# v should be smaller than 5
p = mp.Process(target=_important_func_with_dec, args=(v,))
p.start()
time.sleep(2*sleep_time)
os.kill(p.pid, signal.SIGINT)
p.join()
assert v.value == 9
assert p.exitcode == 1 # since the SIGINT is reemited again after the scope
# of _important_func the KeyboardInterrupt Error can not be caught
def test_sd_ctx():
def _important_func(v):
no_output()
try:
for i in range(10):
v.value = i
time.sleep(sleep_time)
except KeyboardInterrupt:
# this prevents the traceback
pass
def _important_func_with_dec(v):
with signalDelay.sig_delay([signal.SIGINT]):
_important_func(v)
# call _important_func in a subprocess and send SIGINT after 1 second
# the subprocess will terminate immediately
# v should be smaller than 5
p = mp.Process(target=_important_func, args=(v,))
p.start()
time.sleep(2*sleep_time)
os.kill(p.pid, signal.SIGINT)
p.join()
assert v.value < 5
assert p.exitcode == 0 # since the KeyboardInterrupt Error is caught and ignored
# call _important_func in a subprocess and send SIGINT after 1 second
# the subprocess will terminate immediately
# v should be smaller than 5
p = mp.Process(target=_important_func_with_dec, args=(v,))
p.start()
time.sleep(2*sleep_time)
os.kill(p.pid, signal.SIGINT)
p.join()
assert v.value == 9
assert p.exitcode == 1 # since the SIGINT is reemited again after the scope
# of _important_func the KeyboardInterrupt Error can not be caught
if __name__ == "__main__":
test_sd()
test_sd_ctx()
|
runner.py
|
import argparse
import json
import logging
import os
import threading
import time
import traceback
import colors
import docker
import numpy
import psutil
from ann_benchmarks.algorithms.definitions import (Definition,
instantiate_algorithm)
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.distance import metrics, dataset_transform
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count,
batch):
prepared_queries = \
(batch and hasattr(algo, "prepare_batch_query")) or \
((not batch) and hasattr(algo, "prepare_query"))
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
if prepared_queries:
algo.prepare_query(v, count)
start = time.time()
algo.run_prepared_query()
total = (time.time() - start)
candidates = algo.get_prepared_query_results()
else:
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' % (n_items_processed[0], len(X_test)))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count'
' is only %d)' % (algo, len(candidates), count))
return (total, candidates)
def batch_query(X):
if prepared_queries:
algo.prepare_batch_query(X, count)
start = time.time()
algo.run_batch_query()
total = (time.time() - start)
else:
start = time.time()
algo.batch_query(X, count)
total = (time.time() - start)
results = algo.get_batch_results()
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if batch:
results = batch_query(X_test)
else:
results = [single_query(x) for x in X_test]
total_time = sum(time for time, _ in results)
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": batch,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, batch):
print(definition.algorithm)
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
size = 300000
D, dimension = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
# D['train'] = X_train.tolist()
# D['test'] = X_test.tolist()
distance = D.attrs['distance']
print("extracted distance: ", distance)
print('got a train set of size (%d * %d)' % (X_train.shape[0], dimension))
print('got %d queries' % len(X_test))
X_train, X_test = dataset_transform(D)
# print(X_train.shape, X_test.shape)
X_train, X_test = X_train[:size, :], X_test[:size,:]
X_train = numpy.vstack([X_train, X_test])
print(X_train.shape)
try:
prepared_queries = False
if hasattr(algo, "supports_prepared_queries"):
prepared_queries = algo.supports_prepared_queries()
t0 = time.time()
memory_usage_before = algo.get_memory_usage()
print("doing first fit")
print(X_train.shape)
algo.fit(X_train)
print("done fit")
build_time = time.time() - t0
index_size = algo.get_memory_usage() - memory_usage_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X_train, X_test, distance, count, run_count, batch)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
# print(definition)
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
store_results(dataset, count, definition,
query_arguments, descriptor, results, batch)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser('''
NOTICE: You probably want to run.py rather than this script.
''')
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
help=f'Dataset to benchmark on.',
required=True)
parser.add_argument(
'--algorithm',
help='Name of algorithm for saving the results.',
required=True)
parser.add_argument(
'--module',
help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"',
required=True)
parser.add_argument(
'--constructor',
help='Constructer to load from modulel. E.g. "Annoy"',
required=True)
parser.add_argument(
'--count',
help='K: Number of nearest neighbours for the algorithm to return.',
required=True,
type=int)
parser.add_argument(
'--runs',
help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.',
required=True,
type=int)
parser.add_argument(
'--batch',
help='If flag included, algorithms will be run in batch mode, rather than "individual query" mode.',
action='store_true')
parser.add_argument(
'build',
help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]'
)
parser.add_argument(
'queries',
help='JSON of arguments to pass to the queries. E.g. [100]',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
print(algo_args)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.batch)
def run_docker(definition, dataset, count, runs, timeout, batch, cpu_limit,
mem_limit=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if batch:
cmd += ['--batch']
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
print(mem_limit, cpu_limit)
print("DEF:", definition)
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'):
{'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
# cpuset_cpus=cpu_limit,
cpu_shares=1024,
mem_limit=mem_limit,
detach=True)
logger = logging.getLogger(f"annb.{container.short_id}")
logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \
(container.short_id, cpu_limit, mem_limit, timeout, cmd))
def stream_logs():
for line in container.logs(stream=True):
logger.info(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
try:
exit_code = exit_code.StatusCode
except AttributeError:
pass
if exit_code not in [0, None]:
print("EXIT CODE:", exit_code)
if(type(exit_code) != dict):
logger.error(colors.color(container.logs().decode(), fg='red'))
logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code))
except:
logger.error('Container.wait for container %s failed with exception' % container.short_id)
traceback.print_exc()
finally:
container.remove(force=True)
|
test_advanced.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent.futures import ThreadPoolExecutor
import glob
import json
import logging
import os
import random
import setproctitle
import shutil
import six
import sys
import socket
import subprocess
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
from ray import signature
import ray.ray_constants as ray_constants
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo(object):
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(address=cluster.address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.node.unique_id
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.node.unique_id
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_advanced remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
# Check that Actors are not overwritten by remote calls from different
# classes.
@ray.remote
class RemoteActor1(object):
def __init__(self):
pass
def function1(self):
return 0
@ray.remote
class RemoteActor2(object):
def __init__(self):
pass
def function2(self):
return 1
actor1 = RemoteActor1.remote()
_ = RemoteActor2.remote()
assert ray.get(actor1.function1.remote()) == 0
# Test passing ObjectIDs.
@ray.remote
def direct_dep(input):
return input
@ray.remote
def indirect_dep(input):
return ray.get(direct_dep.remote(input[0]))
assert ray.get(indirect_dep.remote(["hello"])) == "hello"
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_node_id_resource(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
cluster.add_node(num_cpus=3)
ray.init(address=cluster.address)
local_node = ray.state.current_node_id()
# Note that these will have the same IP in the test cluster
assert len(ray.state.node_ids()) == 2
assert local_node in ray.state.node_ids()
@ray.remote(resources={local_node: 1})
def f():
return ray.state.current_node_id()
# Check the node id resource is automatically usable for scheduling.
assert ray.get(f.remote()) == ray.state.current_node_id()
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
for key in list(resources.keys()):
if key.startswith("node:"):
del resources[key]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.", resources)
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [
signature.DUMMY_TYPE, 1, signature.DUMMY_TYPE, "hi",
signature.DUMMY_TYPE, x_id
]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=int(10**8))
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert "unique_1" in setproctitle.getproctitle()
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
x_id = ray.put("HI")
x_copy = ray.ObjectID(x_id.binary())
assert ray.get(x_copy) == "HI"
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert ray.get(x_id) == "HI"
assert ray.get(x_copy) == "HI"
# now it can be evicted since x_id pins it but x_copy does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(x_copy)
# weakref put
y_id = ray.put("HI", weakref=True)
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(y_id)
@ray.remote
def check_no_buffer_ref(x):
assert x[0].get_buffer_ref() is None
z_id = ray.put("HI")
assert z_id.get_buffer_ref() is not None
ray.get(check_no_buffer_ref.remote([z_id]))
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x, weakref=True)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob("{}/worker*.out".format(logs_dir))
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor(object):
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir))
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
simple_chat_server.py
|
# -*- coding: utf-8 -*-
"""simple_messager_server
"""
import socket
from threading import Thread
HOST="0.0.0.0"
PORT=8080
HEADER=64
s=socket.socket(family=socket.AF_INET,type=socket.SOCK_STREAM)
s.bind((HOST,PORT))
client={}
addresses={}
def handle_client_in(conn,addr):
name_len=int(conn.recv(HEADER).decode('utf8'))
nickname=conn.recv(name_len).decode('utf8')
print(client)
print(f"{nickname} joined in room")
broadcast(f"{nickname} joined in room")
client[conn]=nickname
while True:
try:
msg_len=conn.recv(HEADER).decode('utf8')
if msg_len:
msg=conn.recv(int(msg_len)).decode('utf8')
print(f"{nickname}:{msg}")
broadcast(f"{nickname}:{msg}")
except:
del client[conn]
print(f"{nickname} left room")
broadcast(f"{nickname} left room")
conn.close()
print(client)
break
def broadcast(msg,client=client):
for conn in client.keys():
conn.send(f"{msg}".encode('utf8'))
s.listen()
print(f"server launched, listening on port {PORT}")
while True:
conn,address=s.accept()
print(address,"Connected")
print(conn)
conn.send("Welcome to the chatting room, please in put your nickname:".encode('utf8'))
addresses[conn]=address
Thread(target=handle_client_in,args=(conn,address)).start()
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.support.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
composed_reader.py
|
#!/usr/bin/env python3
import logging
import sys
import threading
import time
from os.path import dirname, realpath; sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.readers.reader import Reader
from logger.transforms.transform import Transform
from logger.utils import formats
# How long to a reader thread should lie dormant before shutting down
# and counting on getting restarted again if/when needed. We need this
# so that our readers eventually terminate.
READER_TIMEOUT_WAIT = 0.25
################################################################################
class ComposedReader(Reader):
"""
Read lines from one or more Readers (in parallel) and process their
responses through zero or more Transforms (in series).
NOTE: we make the rash assumption that transforms are thread-safe,
that is, that no mischief or corrupted internal state will result if
more than one thread calls a transform at the same time. To be
thread-safe, a transform must protect any changes to its internal
state with a non-re-entrant thread lock, as described in the threading
module.
Also NOTE: Most of the messy logic in this class comes from the desire
to only call read() on our component readers when we actually need new
records (NOTE: this desire may be misplaced!).
So when we get a request, we fire up threads and ask each of our
readers for a record. We return the first one we get, and let the
others pile up in a queue that we'll feed from the next time we're
asked.
But we don't want to fire up a new thread for each reader every time
the queue is empty, so we have threads (in run_reader()) hang out for
a little while, waiting for another queue_needs_record event. If they
get one, the call their own read() methods again. If they haven't been
called on in READER_TIMEOUT_WAIT seconds, they exit, but will get
fired up again by read() if/when the queue is empty and we're is asked
for another record.
It's important to have the run_reader threads time out, or any process
using a ComposedReader will never naturally terminate.
"""
############################
def __init__(self, readers, transforms=[], check_format=False):
"""
Instantiation:
```
reader = ComposedReader(readers, transforms=[], check_format=True)
readers A single Reader or a list of Readers.
transforms A single Transform or list of zero or more Transforms.
check_format If True, attempt to check that Reader/Transform formats
are compatible, and throw a ValueError if they are not.
If check_format is False (the default) the output_format()
of the whole reader will be formats.Unknown.
```
Use:
```
record = reader.read()
```
Sample:
```
reader = ComposedReader(readers=[NetworkReader(':6221'),
NetworkReader(':6223')],
transforms=[TimestampTransform()])
```
"""
# Make readers a list, even if it's only a single reader.
self.readers = readers if type(readers) == type([]) else [readers]
self.num_readers = len(self.readers)
# Transforms can be empty. But if not empty, make it a list, even
# if it's only a single transform.
if not type(transforms) == type([]):
self.transforms = [transforms]
else:
self.transforms = transforms
# If they want, check that our readers and transforms have
# compatible input/output formats.
output_format = formats.Unknown
if check_format:
output_format = self._check_reader_formats()
if not output_format:
raise ValueError('ComposedReader: No common output format found '
'for passed readers: %s' % [r.output_format()
for r in self.readers])
super().__init__(output_format=output_format)
# List where we're going to store reader threads
self.reader_threads = [None] * self.num_readers
# Whether reader[i] has returned EOF since we've last asked it
self.reader_returned_eof = [False] * self.num_readers
# One lock per reader, to save us from accidental re-entry
self.reader_locks = [threading.Lock() for i in range(self.num_readers)]
# Queue where we'll store extra records, and lock so only one
# thread can touch queue at a time
self.queue = []
self.queue_lock = threading.Lock()
# The two events, queue_has_record and queue_needs_record interact
# in a sort of a dance:
#
# has = False, need = False: Everything is quiescent
# has = False, need = True: A request has been made, call readers
# has = True, need = True: Momentary condition when we get needed rec
# has = True, need = False: We've got spare records in the queue
#
# Set when the queue is empty and we need a record
self.queue_needs_record = threading.Event()
# Set when a reader adds something to the queue
self.queue_has_record = threading.Event()
############################
def read(self):
"""
Get the next record from queue or readers.
"""
# If we only have one reader, there's no point making things
# complicated. Just read, transform, return.
if len(self.readers) == 1:
return self._apply_transforms(self.readers[0].read())
# Do we have anything in the queue? Note: safe to check outside of
# lock, because we're the only method that actually *removes*
# anything. So if tests True here, we're assured that there's
# something there, and we lock before retrieving it. Advantage of
# doing it this way is that we don't tie up queue lock while
# processing transforms.
if self.queue:
logging.debug('read() - read requested; queue len is %d',
len(self.queue))
with self.queue_lock:
record = self.queue.pop(0)
return self._apply_transforms(record)
# If here, nothing's in the queue. Note that, if we wanted to be
# careful to never unnecessarily ask for more records, we should
# put a lock around this, but the failure mode is somewhat benign:
# we ask for more records when some are already on the way.
logging.debug('read() - read requested and nothing in the queue.')
# Some threads may have timed out while waiting to be called to
# action; restart them.
for i in range(len(self.readers)):
if not self.reader_threads[i] \
or not self.reader_threads[i].is_alive() \
and not self.reader_returned_eof[i]:
logging.info('read() - starting thread for Reader #%d', i)
self.reader_returned_eof[i] = False
thread = threading.Thread(target=self._run_reader, args=(i,),
daemon=True)
self.reader_threads[i] = thread
thread.start()
# Now notify all threads that we do in fact need a record.
self.queue_needs_record.set()
# Keep checking/sleeping until we've either got a record in the
# queue or all readers have given us an EOF.
while False in self.reader_returned_eof:
logging.debug('read() - waiting for queue lock')
with self.queue_lock:
logging.debug('read() - acquired queue lock, queue length is %d',
len(self.queue))
if self.queue:
record = self.queue.pop(0)
if not self.queue:
self.queue_has_record.clear() # only set/clear inside queue_lock
logging.debug('read() - got record')
return self._apply_transforms(record)
else:
self.queue_has_record.clear()
# If here, nothing in queue yet. Wait
logging.debug('read() - clear of queue lock, waiting for record')
self.queue_has_record.wait(READER_TIMEOUT_WAIT)
if not self.queue_has_record.is_set():
logging.debug('read() - timed out waiting for record. Looping')
logging.debug('read() - readers returned EOF: %s',
self.reader_returned_eof)
# All readers have given us an EOF
logging.debug('read() - all threads returned None; returning None')
return None
############################
def _run_reader(self, index):
"""
Cycle through reading records from a readers[i] and putting them in queue.
"""
while True:
logging.debug(' Reader #%d waiting until record needed.', index)
self.queue_needs_record.wait(READER_TIMEOUT_WAIT)
# If we timed out waiting for someone to need a record, go
# home. We'll get started up again if needed.
if not self.queue_needs_record.is_set():
logging.debug(' Reader #%d timed out - exiting.', index)
return
# Else someone needs a record - leap into action
logging.debug(' Reader #%d waking up - record needed!', index)
# Guard against re-entry
with self.reader_locks[index]:
record = self.readers[index].read()
# If reader returns None, it's done and has no more data for
# us. Note that it's given us an EOF and exit.
if record is None:
logging.info(' Reader #%d returned None, is done', index)
self.reader_returned_eof[index] = True
return
logging.debug(' Reader #%d has record, released reader_lock.', index)
# Add record to queue and note that an append event has
# happened.
with self.queue_lock:
# No one else can mess with queue while we add record. Once we've
# added it, set flag to say there's something in the queue.
logging.debug(' Reader #%d has queue lock - adding and notifying.',
index)
self.queue.append(record)
self.queue_has_record.set()
self.queue_needs_record.clear()
# Now clear of queue_lock
logging.debug(' Reader #%d released queue_lock - looping', index)
############################
def _apply_transforms(self, record):
"""
Apply the transforms in series.
"""
if record:
for t in self.transforms:
record = t.transform(record)
if not record:
break
return record
############################
def _check_reader_formats(self):
"""
Check that Reader outputs are compatible with each other and with
Transform inputs. Return None if not.
"""
# Find the lowest common format among readers
lowest_common = self.readers[0].output_format()
for reader in self.readers:
lowest_common = reader.output_format().common(lowest_common)
if not lowest_common:
return None
logging.debug('Lowest common format for readers is "%s"', lowest_common)
if not self.transforms:
return lowest_common
# Now check the transforms in series - output of each is input of
# next one.
for transform in self.transforms:
if not transform.input_format().can_accept(lowest_common):
logging.error('Transform %s can not accept input format %s',
transform, lowest_common)
return None
lowest_common = transform.output_format()
# Our final format is the lowest common format from last transform
return lowest_common
|
test_database_manager.py
|
import time
import unittest
import json
import os
from database_manager import Database
from threading import Thread
test_name = "___testing"
test_filename = test_name + ".json"
test_data = {
"1": 1,
"test": "test",
"list": ["1", 2, {"3": 3}],
"dict": {"test": "data"}
}
class TestDbStatic(unittest.TestCase):
def setUp(self) -> None:
""" Create a ___testing.json
In setup we are assuming that .create works so that one is in a seperate testcase
"""
Database.create(test_name, test_data, replace=True)
def tearDown(self) -> None:
""" Remove the test json file """
os.remove(test_name + ".json")
def test_lock_property(self):
db = Database(test_name)
self.assertEqual(db.lock, db.locks[test_name])
def test_get_lock(self):
self.assertEqual(Database.get_lock(test_name), Database.locks[test_name])
def test_name_property(self):
db = Database(test_name)
self.assertEqual(db.name, test_name)
def test_read(self):
self.assertEqual(Database.read(test_name), test_data)
def test_write(self):
data = {"woah": ['1', '2', '3'], "some": "test", "data": 1}
Database.write(test_name, data)
self.assertEqual(Database.read(test_name), data)
def test_add(self):
Database.add(test_name, "some_key", "some_data")
data = Database.read(test_name)
self.assertIn("some_key", data)
self.assertEqual(data["some_key"], "some_data")
Database.add(test_name, "some_key", {"some_data": 1})
data = Database.read(test_name)
self.assertIn("some_key", data)
self.assertEqual(data["some_key"], {"some_data": 1})
def test_append(self):
Database.write(test_name, {test_name: []})
Database.append(test_name, "hi")
data = Database.read(test_name)
self.assertIn(test_name, data)
self.assertIn("hi", data[test_name])
Database.append(test_name, "hi2")
data = Database.read(test_name)
self.assertIn(test_name, data)
self.assertIn("hi", data[test_name])
self.assertIn("hi2", data[test_name])
Database.append(test_name, "hi")
def test_translate(self):
self.assertEqual(Database.translate(test_name, "test"), "test")
self.assertEqual(Database.translate(test_name, "1"), 1)
def test_info(self):
self.assertTrue(type(Database.info == dict), "Database.info did not return a dict")
self.assertIn("backup_directory_name", Database.info(), "Database.info does not have backup_directory_name")
self.assertIn("my_path", Database.info(), "Database.info does not have my_path")
self.assertIn("backup_folder_path", Database.info(), "Database.info does not have backup_folder_path")
self.assertIn("locks", Database.info(), "Database.info does not have locks")
class TestDatabaseInstance(unittest.TestCase):
def setUp(self) -> None:
""" Create a ___testing.json
In setup we are assuming that .create works so that one is in a seperate testcase
"""
Database.create(test_name, test_data, replace=True)
def tearDown(self) -> None:
""" Remove the test json file """
os.remove(test_name + ".json")
def test_translates(self):
db = Database(test_name)
self.assertEqual(db.translates("test"), "test")
self.assertEqual(db.translates("1"), 1)
def test_appends(self):
db = Database(test_name)
db.writes({test_name: []})
db.append(test_name, "hi")
data = db.reads()
self.assertIn(test_name, data)
self.assertIn("hi", data[test_name])
db.appends("hi2")
data = db.reads()
self.assertIn(test_name, data)
self.assertIn("hi", data[test_name])
self.assertIn("hi2", data[test_name])
Database.append(test_name, "hi")
def test_adds(self):
db = Database(test_name)
db.adds("some_key", "some_data")
data = db.reads()
self.assertIn("some_key", data)
self.assertEqual(data["some_key"], "some_data")
db.adds("some_key", {"some_data": 1})
data = db.reads()
self.assertIn("some_key", data)
self.assertEqual(data["some_key"], {"some_data": 1})
def test_writes(self):
db = Database(test_name)
db.data = {"woah": ['1', '2', '3'], "some": "test", "data": 1}
db.writes()
self.assertEqual(db.reads(), db.data)
db.writes({"woah": ['1', '2', '3'], "some": "test", "data": 1})
self.assertEqual(db.reads(), {"woah": ['1', '2', '3'], "some": "test", "data": 1})
def test_reads(self):
db = Database(test_name)
self.assertEqual(db.reads(), test_data)
def test__contains__(self):
db = Database(test_name)
self.assertTrue("test" in db)
self.assertTrue("1" in db)
self.assertFalse("2" in db)
def test_context_manager(self):
with Database(test_name) as db:
db["aapje"] = "aapje"
db["aapje1"] = ["aapje"]
data = Database.read(test_name)
self.assertEqual(data["aapje"], "aapje")
self.assertEqual(data["aapje1"], ["aapje"])
def test_context_manager_with_error(self):
with self.assertRaises(KeyError):
with Database(test_name) as db:
db["aapje"] = "aapje"
raise KeyError
data = Database.read(test_name)
self.assertNotIn("aapje", data)
def test_context_manager_with_malformed(self):
with self.assertRaises(TypeError):
with Database(test_name) as db:
db["aaa"] = "aaa"
db["aapje"] = Exception # It can't write this so no data is written
data = Database.read(test_name)
self.assertNotIn("aapje", data)
self.assertNotIn("aaa", data)
def test_thread_save(self):
# This should take 2 seconds showing that the threads waited on eachother
db = Database(test_name)
def thread_write(databse_object: Database, _id: int, data: str):
with databse_object as dbs:
dbs["thread"] = _id
time.sleep(1)
dbs["thread_data"] = data
s = time.time()
t = Thread(target=thread_write, args=(db, 1, "lala1"))
t2 = Thread(target=thread_write, args=(db, 2, "lala2"))
t.start()
t2.start()
t.join()
t2.join()
s1 = time.time()
data = Database.read(test_name)
self.assertEqual(data["thread"], 2)
self.assertEqual(data["thread_data"], "lala2")
self.assertAlmostEqual(s1 - s, 2.0, 1)
class TestCreateBackups(unittest.TestCase):
def setUp(self) -> None:
""" Create a ___testing.json
In setup we are assuming that .create works so that one is in a seperate testcase"""
Database.create(test_name, test_data, replace=True)
os.mkdir(Database.backup_folder_path)
self.backups_already_there = set(os.listdir(Database.backup_folder_path))
def tearDown(self) -> None:
""" Remove the test json file """
os.remove(os.path.join(self.new_file_full_path, test_filename))
os.removedirs(self.new_file_full_path)
def test_create_backup(self):
Database.create_backup([test_name])
self.assertTrue(os.path.exists(Database.backup_folder_path)) # is backup folder there
files_in_backup_dir = set(os.listdir(Database.backup_folder_path))
self.assertTrue(len(files_in_backup_dir) == len(self.backups_already_there) + 1) # is another folder added
self.new_file_list = list(files_in_backup_dir - self.backups_already_there)
self.new_file_full_path = os.path.join(Database.backup_folder_path, self.new_file_list[0])
self.assertEqual(len(self.new_file_list), 1)
self.assertEqual(len(os.listdir(self.new_file_full_path)), 1)
class TestDBCreate(unittest.TestCase):
def test_create(self):
Database.create(test_name, test_data)
# Is the file there
self.assertTrue(os.path.exists(test_filename))
# Does the file have the data
with open(test_filename) as f:
self.assertEqual(json.load(f), test_data)
# is the lock added
self.assertIn(test_name, Database.locks)
with open(test_filename) as f:
# does replace=True/False work
Database.create(test_name, {"hi": "there!"}) # Should not be created!
self.assertEqual(json.load(f), test_data)
with open(test_filename) as f:
Database.create(test_name, {"hi": "there"}, replace=True)
self.assertEqual(json.load(f), {"hi": "there"})
with open(test_filename) as f:
Database.create(test_name, {"hi": "there2"})
self.assertEqual(json.load(f), {"hi": "there"})
def tearDown(self) -> None:
""" Remove the test json file """
os.remove(test_filename)
if __name__ == '__main__':
unittest.main()
|
comm_funcs.py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
公用函数库/类
"""
import random
import requests
import datetime
import json
import calendar
import os
import pandas as pd
import threading
import aiohttp
import yaml
import akshare as ak
import sqlalchemy
import smtplib
import pymysql
from pymysql.cursors import DictCursor
from email.mime.text import MIMEText
from redis import StrictRedis
def ua_random():
"""
随机获取一个user-agent
:return: user-agent
"""
user_agent_list = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393"
]
return random.choice(user_agent_list)
async def async_crawl(url):
header = {
'user-agent': ua_random()
}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=header) as response:
return await response.text()
def requests_get(url):
"""
利用request模拟一个get请求
:param url:
:return:
"""
header = {
'user-agent': ua_random()
}
data = requests.get(url, header)
if data.status_code == 200:
return data.text
return None
def get_current_date():
"""
获取当前日期
:return: str,日期
"""
return datetime.datetime.now().strftime('%Y-%m-%d')
def except_handle(exception_handle):
print(exception_handle)
def get_page_num(url):
"""
获取分页数量
:param url:
"""
try:
requests_text = requests_get(url)
return json.loads(requests_text)
except Exception as e:
# 传给异常处理函数
except_handle(e)
def time_last_day_of_month(year=None, month=None):
"""
获取当前月的最后一天
:return:
"""
if year is None:
year = datetime.datetime.now().year
if month is None:
month = datetime.datetime.now().month
day = calendar.monthrange(year, month)[1]
if len(str(month)) == 1:
month = '0' + str(month)
return '-'.join([str(year), str(month), str(day)])
class Singleton(object):
"""
单例模式
"""
_instance_lock = threading.Lock()
_conf = None
def __init__(self):
pass
def __new__(cls, *args, **kwargs):
_instance = kwargs.get("instance", cls.__name__ + "_instance")
if not hasattr(Singleton, _instance):
with Singleton._instance_lock:
if not hasattr(Singleton, _instance):
Singleton._instance = object.__new__(cls)
return Singleton._instance
class YamlConfigParser(Singleton):
"""
单例模式
"""
_conf = None
@classmethod
def get_config(cls):
if not cls._conf:
# 读取配置
work_path = os.path.dirname(os.path.realpath(__file__))
yaml_file = os.path.join(work_path, 'config.yaml')
with open(yaml_file, 'r', encoding="utf-8") as file:
file_data = file.read()
yaml_config = yaml.safe_load(file_data)
setattr(cls, "_conf", yaml_config)
return cls._conf
# def __new__(cls, *args, **kwargs):
# if not hasattr(YamlConfigParser, "_instance"):
# with YamlConfigParser._instance_lock:
# if not hasattr(YamlConfigParser, "_instance"):
# YamlConfigParser._instance = object.__new__(cls)
#
# # 读取配置
# work_path = os.path.dirname(os.path.realpath(__file__))
# yaml_file = os.path.join(work_path, 'config.yaml')
# with open(yaml_file, 'r', encoding="utf-8") as file:
# file_data = file.read()
#
# yaml_config = yaml.safe_load(file_data)
# setattr(YamlConfigParser, "_conf", yaml_config)
#
# return YamlConfigParser._instance
def get_config(read_default_group=None, key=None):
"""
获取yaml文件参数
:param read_default_group: 参数分类,可选,如果不传则返回整个yaml配置
:param key: 参数名,可选,必须和read_default_group一起使用
:return:dict|string
"""
config_parser = YamlConfigParser()
yaml_global_config = config_parser.get_config()
# 如果不为空,则取值
if read_default_group is not None:
if read_default_group not in yaml_global_config:
raise KeyError("配置项 {} 不存在".format(read_default_group))
group_config = yaml_global_config[read_default_group]
if key is not None:
if key not in group_config:
raise KeyError("配置项 {} 中 {} 不存在".format(read_default_group, key))
# 返回单个配置值
return group_config[key]
# 返回分类配置所有值
return group_config
# 返回整个配置
return yaml_global_config
def send_main(contents='', subject='', receivers=None):
"""
发送邮件
:param contents: 发送内容
:param subject: 发送主题
:param receivers: 接收人
:return:
"""
# 设置服务器所需信息
mail_config = get_config()
mail_host = mail_config['email']['mail_host']
# 用户名
mail_user = mail_config['email']['mail_user']
# 密码(部分邮箱为授权码)
mail_pass = mail_config['email']['mail_pass']
# 邮件发送方邮箱地址
sender = mail_config['email']['mail_sender']
if receivers is None and "mail_receiver" in mail_config['email'] and mail_config['email']['mail_receiver']:
# 邮件接受方邮箱地址,注意需要[]包裹,这意味着你可以写多个邮件地址群发
receivers = [mail_config['email']['mail_receiver']]
elif not isinstance(receivers, list):
raise TypeError("接收的邮箱不能为空且必须为列表,如['111243204893084039@qq.com']")
# 邮件内容设置纯文本
message = MIMEText(contents, 'plain', 'utf-8')
# 邮件主题
message['Subject'] = subject
# 发送方信息
message['From'] = sender
# 接受方信息
message['To'] = receivers[0]
try:
smtp_obj = smtplib.SMTP()
# 连接到服务器
smtp_obj.connect(mail_host, 25)
# 登录到服务器
smtp_obj.login(mail_user, mail_pass)
# 发送
smtp_obj.sendmail(sender, receivers, message.as_string())
# 退出
smtp_obj.quit()
return True
except smtplib.SMTPException as e:
raise smtplib.SMTPException(e)
def get_csv_path(symbol, period="daily") -> str:
"""
获取个股本地csv路径
:param period: 日线:daily; 周线:weekly; 月线: monthly
:param symbol: 个股代码
:return: str
"""
config = get_config("save_path")
save_file_path = config["stock"][period]["path"]
save_file_name = config["stock"][period]["file_name"].replace("<<stock_code>>", symbol)
return os.path.join(save_file_path, save_file_name)
def get_symbol(
symbol: str,
period: str = "daily",
adjust: str = "",
start_date: str = "",
end_date: str = "",
downloaded: bool = True
):
"""
获取df
:param symbol: 个股代码
:param period: 日线:daily; 周线:weekly; 月线: monthly
:param adjust: 复权类型,前复权:"qfq";后复权:"hfq";"不复权":"", 默认不复权
:param start_date: 开始时间
:param end_date: 结束时间
:param downloaded: 是否需要下载到本地
:return:
"""
file_name = get_csv_path(symbol, period)
# 优先下载最新的到本地
if downloaded:
return down_symbol(symbol, period, is_return=True)
if len(end_date):
end_date = get_current_date().replace('-', '')
if os.path.exists(file_name):
return pd.read_csv(file_name)
# df.set_index('trade_date', inplace=True, drop=False)
# df.sort_index(ascending=True, inplace=True)
else:
param = {
"symbol": symbol,
"period": period,
"start_date": start_date,
"end_date": end_date,
"adjust": adjust,
}
return ak.stock_zh_a_hist(**param)
def down_symbol(
symbol: str,
period: str = "daily",
is_return: bool = False,
adjust: str = ""
) -> pd.DataFrame:
"""
下载个股记录并保存为csv到本地
:param symbol: 个股代码
:param period: 日线:daily; 周线:weekly; 月线: monthly
:param is_return: 是否需要返回
:param adjust: 复权类型,前复权:"qfq";后复权:"hfq";"不复权":"", 默认不复权
:return: any
"""
try:
save_filename = get_csv_path(symbol, period)
param = {
"symbol": symbol,
"period": period,
"adjust": adjust,
}
symbol_df = ak.stock_zh_a_hist(**param)
symbol_df.to_csv(save_filename, index=False)
if is_return:
return symbol_df
except Exception as e:
raise ValueError('{}下载失败,原因: {}'.format(symbol, e))
def find_trade_date(return_format="date", trade_date=None):
"""
返回最近一个交易日
:param return_format: 返回的格式,date: 返回 datetime.date; int: 返回整型,如:20210917;str: 返回字符串格式,如2021-09-17
:param trade_date: 如果传值了,查找距离该值最近的交易日,比如2021-09-18(周六),最近的交易日是2021-09-17;
如果传值的日期是交易日,则返回本身;因此也可以作为判断某天是否是交易日(只需要判断返回值和传值是否相等)
:return:
"""
if trade_date is None:
trade_date = datetime.datetime.now().date()
if not isinstance(trade_date, datetime.date):
# 如果提交的不是datetime,做一个转换
if isinstance(trade_date, str):
year, month, day = trade_date.split('-')
trade_date = datetime.date(int(year), int(month), int(day))
if isinstance(trade_date, int):
year = int(str(trade_date)[0:4])
month = int(str(trade_date)[4:6])
day = int(str(trade_date)[6:8])
trade_date = datetime.date(int(year), int(month), int(day))
find = 0
last_trade_date = trade_date
redis_client = RedisClient()
redis_conn = redis_client.get_redis_client()
key = get_config("cache", "history_td_set")
if redis_conn.sismember(key, int(str(trade_date).replace('-', ''))):
find = 1
last_trade_date = trade_date
# 没有找到,则通过接口查找
if find == 0:
df = ak.tool_trade_date_hist_sina()
today_df = df.loc[df['trade_date'] == trade_date]
if today_df.empty:
# 继续寻找
today_df = df.loc[df['trade_date'] < trade_date].tail(1)
last_trade_date = today_df['trade_date'].values[0]
# 回写缓存
redis_conn.sadd(key, int(str(last_trade_date).replace('-', '')))
if return_format == 'int':
return int(str(last_trade_date).replace('-', ''))
if return_format == 'str':
return str(last_trade_date)
return last_trade_date
def get_trade_detail(symbol: str = "", trade_date: str = "") -> dict:
"""
获取一个交易日交易信息
:param symbol: 代码
:type symbol: str
:param trade_date: 交易日
:type symbol: str
:return: 交易信息
:rtype: dict
"""
param = {
"symbol": symbol,
"period": "daily",
"start_date": trade_date.replace("-", ""),
"end_date": trade_date.replace("-", ""),
}
symbol_df = ak.stock_zh_a_hist(**param)
if not symbol_df.empty:
key = symbol_df.columns.values
val = symbol_df.values[0]
return dict(zip(key, val))
return {}
def get_db_config():
"""
获取服务器链接
:return: host, user, password, database, port, charset
"""
db_config = get_config()
if "mysql" in db_config:
host = db_config['mysql']['host']
user = db_config['mysql']['user']
password = db_config['mysql']['password']
database = db_config['mysql']['database']
port = db_config['mysql']['port']
charset = db_config['mysql']['charset']
return host, user, password, database, port, charset
else:
raise KeyError("mysql configuration must exist")
def get_db_engine_for_pandas():
"""
获取mysql连接,主要搭配pandas使用
:return:
"""
host, user, password, database, port, charset = get_db_config()
cnf = "mysql+pymysql://{}:{}@{}:{}/{}".format(
user, password, host, port, database)
return sqlalchemy.create_engine(cnf)
# return sqlalchemy.create_engine(cnf, echo=True)
class RedisClient(Singleton):
_client = None
@classmethod
def get_redis_client(cls, db=None):
"""
获取redis连接
:return:
"""
if not cls._client:
redis_config = get_config()
redis_db = int(redis_config['redis']['queue_db'])
if db is not None:
redis_db = db
client = StrictRedis(
host=redis_config['redis']['host'],
port=int(redis_config['redis']['port']),
db=redis_db,
password=redis_config['redis']['password'],
decode_responses=True)
setattr(cls, "_client", client)
return cls._client
def get_redis_client(db=None):
"""
获取redis连接
:return:
"""
redis_config = get_config()
redis_host = redis_config['redis']['host']
redis_port = int(redis_config['redis']['port'])
redis_queue_db = int(redis_config['redis']['queue_db'])
redis_passwd = redis_config['redis']['password']
if db is not None:
redis_queue_db = db
return StrictRedis(
host=redis_host,
port=redis_port,
db=redis_queue_db,
password=redis_passwd,
decode_responses=True)
def get_mysql_client():
"""
获取数据库连接
:return:
"""
host, user, password, database, port, charset = get_db_config()
db_config = {
'host': host,
'user': user,
'password': password,
'database': database,
'port': int(port),
'charset': charset
}
return pymysql.connect(**db_config)
class MysqlClient(Singleton):
_mysql_client = None
@classmethod
def get_client(cls, dict_cursor=True, db=None):
"""
获取mysql连接
:return:
"""
if not cls._mysql_client:
host, user, password, database, port, charset = get_db_config()
if db is not None:
database = db
db_config = {
'host': host,
'user': user,
'password': password,
'database': database,
'port': int(port),
'charset': charset
}
if dict_cursor:
db_config['cursorclass'] = DictCursor
client = pymysql.connect(**db_config)
setattr(cls, "_mysql_client", client)
return cls._mysql_client
def get_mysql_client_dict():
"""
获取数据库链接
:return:
"""
host, user, password, database, port, charset = get_db_config()
db_config = {
'host': host,
'user': user,
'password': password,
'database': database,
'port': int(port),
'charset': charset,
'cursorclass': DictCursor
}
return pymysql.connect(**db_config)
def date_to_int(date, slug='-'):
"""
将日期转换成整数,如2020-10-12 转换成20201012
:param date:
:param slug: 分隔符
:return:
"""
return str(date).replace(slug, '')
def int_to_date(number, slug='-'):
"""
将数字转换成日期,如20201012 转成2020-10-12
:param number:
:param slug: 分隔符
:return:
"""
number_str = str(number)
if len(number_str) == 8:
return slug.join([number_str[0:4], number_str[4:6], number_str[6:8]])
return ''
def check_work_time(**kwargs):
"""
检测是否在工作时段,如果不在工作时段,则休眠,默认是 9:25:00.000 - 15:00:00.000
@param: start_hour:开始工作的小时, 默认为上午9
@param: start_minute:开始工作的分钟, 默认为25
@param: start_second:开始工作的秒,默认为0
@param: start_microsecond:开始工作的毫秒,默认为0
@param: end_hour:结束工作的小时,默认为下午15
@param: end_minute:结束工作的分钟,默认为0
@param: end_second:结束工作的秒,默认为0
@param: end_microsecond:结束工作的毫秒,默认为0
使用例子:设置工作时间段为上午10点30分30秒 至 下午14点30分20秒
t = {"end_hour": 14, "end_minute": 30, "end_second": 20, "start_hour": 10, "start_minute": 30, "start_second": 30}
self.check_work_time(**t)
"""
now = datetime.datetime.today()
date = str(now.date())
# 首先检测是否是交易日
if date != find_trade_date(return_format="str", trade_date=date):
# 直接休眠一天
one_day = 24 * 60 * 60
return one_day
start_hour = kwargs.get("start_hour", 9)
start_minute = kwargs.get("start_minute", 25)
start_second = kwargs.get("start_second", 0)
start_microsecond = kwargs.get("start_microsecond", 0)
start_time = datetime.datetime(now.year, now.month, now.day, start_hour,
start_minute, start_second, start_microsecond)
# 检测是否是交易时间
if start_time > now:
time_delta = start_time - now
# 还没开始, 进入休眠
return time_delta.seconds
end_hour = kwargs.get("end_hour", 15)
end_minute = kwargs.get("end_minute", 0)
end_second = kwargs.get("end_second", 0)
end_microsecond = kwargs.get("end_microsecond", 0)
end_time = datetime.datetime(now.year, now.month, now.day,
end_hour, end_minute, end_second, end_microsecond)
if now > end_time:
next_start_time = start_time + datetime.timedelta(days=1)
time_delta = next_start_time - now
# 已经结束了,直接休眠到第二天开始时间
return time_delta.seconds
return -1
def is_stock_rise(item_code):
"""
涨停的百分比
:param item_code:
:return:
"""
if item_code[0:2] == '68' or item_code[0:1] == '3':
return 1.2
return 1.1
def rise_percent(item_code):
"""
获取涨停的百分比
:param item_code:
:return:
"""
return is_stock_rise(item_code)
def get_rise_price(item_code, yclose):
"""
计算涨停的价格
:param item_code:
:param yclose:
:return:
"""
yclose = float(yclose)
stock_rise_percent = rise_percent(item_code)
return round(yclose * stock_rise_percent, 2)
if __name__ == "__main__":
# time_param = {"end_hour": 15, "end_minute": 0, "end_second": 0, "start_hour": 9, "start_minute": 30}
# print(check_work_time(**time_param)/3600)
# redis_client = RedisClient()
# redis_conn = redis_client.get_redis_client()
# print(redis_client)
redis_client = get_config()
print(id(redis_client))
def aa():
# redis_client = RedisClient()
redis_client = get_config()
print(id(redis_client))
t1 = threading.Thread(target=aa, args=[])
t2 = threading.Thread(target=aa, args=[])
t1.start()
import time
time.sleep(10)
t2.start()
|
COMP_Crawler.py
|
# -*- coding:utf-8 -*-
#env: python2.7
#not sure for the correctness, at least I can run.
import urllib2
import os
import re
import urllib
import multiprocessing
def cbk(a, b, c):
per = 100.0 * a * b / c
if per > 100:
per = 100
print
'%.2f%%' % per
def download(url, name):
urllib.urlretrieve(url, name)
def scan(url, file_name):
os.mkdir(base_dir + "\\" + file_name)
os.chdir(base_dir + "\\" + file_name)
result = urllib2.urlopen(url).read()
pattern = re.compile(r"<tr>(.*?)</tr>", re.DOTALL)
f_list = pattern.findall(result)
current_folder_list = []
for list_item in f_list:
pattern_1 = re.compile(r"(\d\d\d\d-\d\d-\d\d \d\d:\d\d)", re.DOTALL)
if pattern_1.search(list_item):
pattern_2 = re.compile(r"<a href=\"(.+?)\">", re.DOTALL)
current_folder_list.extend(pattern_2.findall(list_item))
next_folder_list = []
pdf_list = []
for item in current_folder_list:
pattern_3 = re.compile(r"pdf|ppt", re.DOTALL)
pattern_4 = re.compile(r"/$", re.DOTALL)
if pattern_3.search(item):
pdf_list.append(item)
elif pattern_4.search(item):
next_folder_list.append(item)
for item in pdf_list:
# urllib.urlretrieve(url+r"/"+item,item,cbk)
p = multiprocessing.Process(target=download, args=(url + r"/" + item, item))
# download(url+r"/"+item,item)
p.start()
# p.join()
print
file_name
for folder in next_folder_list:
print
url + folder, file_name + folder
scan(url + folder, file_name + folder)
res_dic = {"comp323": "https://cgi.csc.liv.ac.uk/~spirakis/COMP323-Fall2017/",
"comp329": "https://cgi.csc.liv.ac.uk/~rmw/329_info.php",
"comp319": "https://cgi.csc.liv.ac.uk/~coopes/comp319/",
}
base_dir = "C:\\Users\\Jeff\\desktop"
'''
os.makedirs(base_dir+"lecture")
#os.chdir("lecture")
for item in res_dic:
result = urllib2.urlopen(res_dic[item]).read()
#pattern = r"<tr>(.*)?<a href=\"(.+)?\">(.*)?(\d\d\d\d-\d\d-\d\d \d\d:\d\d)(.*)?</tr>"
pattern= re.compile(r"<tr>(.*?)</tr>",re.DOTALL)
f_list = pattern.findall(result)
current_folder_list = []
for list_item in f_list:
pattern_1 = re.compile(r"(\d\d\d\d-\d\d-\d\d \d\d:\d\d)",re.DOTALL)
if pattern_1.search(list_item):
pattern_2 = re.compile(r"<a href=\"(.+?)\">",re.DOTALL)
current_folder_list.extend(pattern_2.findall(list_item))
'''
if __name__ == "__main__":
scan(res_dic["comp319"], "comp319/")
|
manager.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 50
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
i = 0
if len(r) > 0:
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if not self._control_client_addr:
return
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
server.py
|
from math import floor
from world import World
import Queue
import SocketServer
import datetime
import random
import re
import requests
import urllib2
import json
import sqlite3
import sys
import threading
import time
import traceback
import os
import signal
import boto3
import base64
from botocore.exceptions import ClientError
#rdsData = boto3.client('rds-data',region_name="us-west-2")
region=os.environ['REGION']
rds_client = boto3.client('rds-data', region_name=region)
cluster_arn = os.environ['CLUSTER_ARN']
#cluster_arn = 'arn:aws:rds:us-west-2:163538056407:cluster:craft'
secret_arn = os.environ['SECRET_ARN']
#secret_arn = 'arn:aws:secretsmanager:us-west-2:163538056407:secret:craft-login-HdmZgN'
database_name = os.environ['DB_NAME']
#database_name = 'craft'
agones_port = os.environ['AGONES_SDK_HTTP_PORT']
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = False
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
#SPAWN_POINT = (0, 0, 0, 0, 0)
SPAWN_POINT = os.environ.get('SPAWN_POINT').split(",")
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def execute_rds_statement(sql, sql_parameters=[]):
log('execute_rds_statement',sql,sql_parameters)
response = rds_client.execute_statement(
secretArn=secret_arn,
database=database_name,
resourceArn=cluster_arn,
sql=sql,
parameters=sql_parameters
)
return response
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print line
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(SocketServer.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = Queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except Queue.Empty:
pass
except Queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data)
except Exception:
self.request.close()
#raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed):
#log('Model-__init__',seed)
self.world = World(seed)
self.clients = []
self.queue = Queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except Queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
log('get_block-select from block')
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
sql_parameters = [
{'name':'p', 'value':{'doubleValue': p}},
{'name':'q', 'value':{'doubleValue': q}},
{'name':'x', 'value':{'doubleValue': x}},
{'name':'y', 'value':{'doubleValue': y}},
{'name':'z', 'value':{'doubleValue': z}},
]
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
sql = 'select w from block where p = :p and q = :q and x = :x and y = :y and z = :z;'
result = execute_rds_statement(sql, sql_parameters)
#log('rds_response get_block',result['records'])
if rows:
log('rows from sqlite',rows[0][0])
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
#log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
#log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
#self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
agones_allocate(self)
#TBD adopt PG to load the world from pg insed of craft.db
def on_chunk(self, client, p, q, key=0):
#log('on_chunk-select from block',p,q)
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
sql_parameters = [
{'name':'p', 'value':{'doubleValue': p}},
{'name':'q', 'value':{'doubleValue': q}},
{'name':'key', 'value':{'doubleValue': key}},
]
result = execute_rds_statement(query, sql_parameters)
log('rds_response on_chunk',result['records'])
for _row in result['records']:
rowid=_row[0]['longValue']
x=_row[1]['longValue']
y=_row[2]['longValue']
z=_row[3]['longValue']
w=_row[4]['longValue']
log('on_chunk-result[records]',rowid,x,y,z,w)
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
#for rowid, x, y, z, w in rows:
#log('on_chunk-rows',x,y,z,w)
#blocks += 1
#packets.append(packet(BLOCK, p, q, x, y, z, w))
#max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
#log('on_block','x='+str(x)+' y='+str(y)+' z='+str(z)+' w='+str(w))
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
#log('about to insert ',str(p)+' '+str(q)+' '+str(x)+' '+str(y)+' '+str(z)+' '+str(w))
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
sql_parameters = [
{'name':'p', 'value':{'doubleValue': p}},
{'name':'q', 'value':{'doubleValue': q}},
{'name':'x', 'value':{'doubleValue': x}},
{'name':'y', 'value':{'doubleValue': y}},
{'name':'z', 'value':{'doubleValue': z}},
{'name':'w', 'value':{'doubleValue': w}},
]
sql = 'insert into block (p, q, x, y, z, w) values (:p, :q, :x, :y, :z, :w) on conflict on constraint unique_block_pqxyz do UPDATE SET w = :w'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_block',response)
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
sql = 'delete from sign where x = :x and y = :y and z = :z'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_delete_sign',response)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
sql = 'update light set w = 0 where x = :x and y = :y and z = :z'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_update_light',response)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
sql = 'insert or replace into light (p, q, x, y, z, w) values (:p, :q, :x, :y, :z, :w)'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_insert_to_light',response)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
sql = 'insert or replace into sign (p, q, x, y, z, face, text) values (:p, :q, :x, :y, :z, :face, :text)'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_insert_to_sign',response)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
sql = 'delete from sign where x = :x and y = :y and z = :z and face = :face'
response = execute_rds_statement(sql, sql_parameters)
#log('rds_response_on_delete_sign',response)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
model = Model(None)
model.start()
def cleanup():
log('cleanup')
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print delete_query % (x, y, z)
conn.close()
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def agones_health(model):
url="http://localhost:"+agones_port+"/health"
req = urllib2.Request(url)
req.add_header('Content-Type','application/json')
req.add_data('')
while True:
r = urllib2.urlopen(req)
resp=r.getcode()
log('agones- Response code from agones health was:',resp)
time.sleep(10)
def agones_allocate(model):
url="http://localhost:"+agones_port+"/allocate"
req = urllib2.Request(url)
req.add_header('Content-Type','application/json')
req.add_data('')
r = urllib2.urlopen(req)
resp=r.getcode()
log('agones- Response code from agones allocate was:',resp)
model.send_talk("new player joined - reporting to agones the server is allocated")
def sig_handler(signum,frame):
log('Signal hanlder called with signal',signum)
model.send_talk("Game server maintenance is pending - pls reconnect - don't worry, your universe is saved with us")
def main():
log('main',sys.argv)
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
#model = Model(None)
#model.start()
signal.signal(signal.SIGTERM,sig_handler)
server = Server((host, port), Handler)
server.model = model
newpid=os.fork()
if newpid ==0:
log('agones-in child process about to call agones_health()')
agones_health()
log('agones-in child process called agones_health()')
else:
pids = (os.getpid(), newpid)
log('agones server pid and health pid',pids)
log('SERV', host, port)
server.serve_forever()
if __name__ == '__main__':
main()
|
throttler.py
|
# -*- coding: utf-8 -*-
# Copyright 2016-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Wen Guan <wen.guan@cern.ch>, 2016
# - Vincent Garonne <vincent.garonne@cern.ch>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2017-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
"""
Conveyor throttler is a daemon to manage rucio internal queue.
"""
from __future__ import division
import logging
import math
import os
import socket
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging, formatted_logger
from rucio.common.utils import get_parsed_throttler_mode
from rucio.core import heartbeat, config as config_core
from rucio.core.monitor import record_counter, record_gauge
from rucio.core.request import get_stats_by_activity_direction_state, release_all_waiting_requests, release_waiting_requests_fifo, release_waiting_requests_grouped_fifo
from rucio.core.rse import get_rse, set_rse_transfer_limits, delete_rse_transfer_limits, get_rse_transfer_limits
from rucio.db.sqla.constants import RequestState
graceful_stop = threading.Event()
def throttler(once=False, sleep_time=600, partition_wait_time=10):
"""
Main loop to check rse transfer limits.
"""
logging.info('Throttler starting')
executable = 'conveyor-throttler'
hostname = socket.getfqdn()
pid = os.getpid()
hb_thread = threading.current_thread()
heartbeat.sanity_check(executable=executable, hostname=hostname)
# Make an initial heartbeat so that all throttlers have the correct worker number on the next try
heart_beat = heartbeat.live(executable, hostname, pid, hb_thread)
prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads'])
logging.info(prepend_str + 'Throttler started - timeout (%s)' % (sleep_time))
current_time = time.time()
if partition_wait_time is not None:
graceful_stop.wait(partition_wait_time)
while not graceful_stop.is_set():
try:
heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600)
prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads'])
if heart_beat['assign_thread'] != 0:
logging.info(prepend_str + 'Throttler thread id is not 0, will sleep. Only thread 0 will work')
if once:
break
if time.time() < current_time + sleep_time:
graceful_stop.wait(int((current_time + sleep_time) - time.time()))
current_time = time.time()
continue
logging.info(prepend_str + "Throttler - schedule requests")
run_once(logger=formatted_logger(logging.log, prepend_str + '%s'))
if once:
break
if time.time() < current_time + sleep_time:
graceful_stop.wait(int((current_time + sleep_time) - time.time()))
current_time = time.time()
except Exception:
logging.critical(prepend_str + 'Throtter crashed %s' % (traceback.format_exc()))
if once:
break
logging.info(prepend_str + 'Throtter - graceful stop requested')
heartbeat.die(executable, hostname, pid, hb_thread)
logging.info(prepend_str + 'Throtter - graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, sleep_time=600):
"""
Starts up the conveyer threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
if once:
logging.info('running throttler one iteration only')
throttler(once=True, sleep_time=sleep_time)
else:
threads = []
logging.info('starting throttler thread')
throttler_thread = threading.Thread(target=throttler, kwargs={'once': once, 'sleep_time': sleep_time})
threads.append(throttler_thread)
[thread.start() for thread in threads]
logging.info('waiting for interrupts')
# Interruptible joins require a timeout.
while threads:
threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]
def __get_request_stats(all_activities=False, direction='destination'):
"""
Retrieve stats about requests and collect transfer limits.
:param all_activities: Boolean whether requests are grouped by activity or if activities are ignored.
:param direction: String whether request statistics are based on source or destination RSEs.
"""
logging.info("Throttler retrieve requests statistics")
results = get_stats_by_activity_direction_state(state=[RequestState.QUEUED,
RequestState.SUBMITTING,
RequestState.SUBMITTED,
RequestState.WAITING], all_activities=all_activities, direction=direction)
result_dict = {}
limits = get_rse_transfer_limits()
for result in results:
if direction == 'destination' or direction == 'source':
account = result[0]
state = result[1]
rse = result[2]
counter = result[3]
rse_id = result[4]
if all_activities:
threshold = limits.get('all_activities', {}).get(rse_id, {}).get('max_transfers')
if threshold or (counter and (state == RequestState.WAITING)):
if rse_id not in result_dict:
result_dict[rse_id] = {'waiting': 0,
'transfer': 0,
'threshold': threshold,
'rse': rse,
'strategy': limits.get('all_activities', {}).get(rse_id, {}).get('strategy'),
'deadline': limits.get('all_activities', {}).get(rse_id, {}).get('deadline'),
'volume': limits.get('all_activities', {}).get(rse_id, {}).get('volume'),
'activities': {}}
if state == RequestState.WAITING:
result_dict[rse_id]['waiting'] += counter
else:
result_dict[rse_id]['transfer'] += counter
else:
activity = result[5]
threshold = limits.get(activity, {}).get(rse_id, {}).get('max_transfers')
if threshold or (counter and (state == RequestState.WAITING)):
if rse_id not in result_dict:
result_dict[rse_id] = {
'rse': rse,
'activities': {}
}
if activity not in result_dict[rse_id]['activities']:
result_dict[rse_id]['activities'][activity] = {'waiting': 0,
'transfer': 0,
'strategy': limits.get(activity, {}).get(rse_id, {}).get('strategy'),
'deadline': limits.get('all_activities', {}).get(rse_id, {}).get('deadline'),
'volume': limits.get('all_activities', {}).get(rse_id, {}).get('volume'),
'threshold': threshold,
'accounts': {}}
if account not in result_dict[rse_id]['activities'][activity]['accounts']:
result_dict[rse_id]['activities'][activity]['accounts'][account] = {'waiting': 0, 'transfer': 0}
if state == RequestState.WAITING:
result_dict[rse_id]['activities'][activity]['accounts'][account]['waiting'] += counter
result_dict[rse_id]['activities'][activity]['waiting'] += counter
else:
result_dict[rse_id]['activities'][activity]['accounts'][account]['transfer'] += counter
result_dict[rse_id]['activities'][activity]['transfer'] += counter
return result_dict
def run_once(logger=logging.log, session=None):
"""
Schedule requests
"""
try:
throttler_mode = config_core.get('throttler', 'mode', default='DEST_PER_ACT', use_cache=False)
direction, all_activities = get_parsed_throttler_mode(throttler_mode)
result_dict = __get_request_stats(all_activities, direction)
if direction == 'destination' or direction == 'source':
for rse_id in result_dict:
rse_name = result_dict[rse_id]['rse']
availability = get_rse(rse_id).availability
# dest_rse is not blocklisted for write or src_rse is not blocklisted for read
if (direction == 'destination' and availability & 2) or (direction == 'source' and availability & 4):
if all_activities:
__release_all_activities(result_dict[rse_id], direction, rse_name, rse_id, logger=logger, session=session)
else:
__release_per_activity(result_dict[rse_id], direction, rse_name, rse_id, logger=logger, session=session)
except Exception:
logger(logging.CRITICAL, "Failed to schedule requests, error: %s" % (traceback.format_exc()))
def __release_all_activities(stats, direction, rse_name, rse_id, logger, session):
"""
Release requests if activities should be ignored.
:param stats: Request statistics
:param direction: String whether request statistics are based on source or destination RSEs.
:param rse_name: RSE name.
:param rse_id: RSE id.
"""
threshold = stats['threshold']
transfer = stats['transfer']
waiting = stats['waiting']
strategy = stats['strategy']
if threshold is not None and transfer + waiting > threshold:
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.max_transfers' % (rse_name), threshold)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.transfers' % (rse_name), transfer)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.waitings' % (rse_name), waiting)
if transfer < 0.8 * threshold:
to_be_released = threshold - transfer
if strategy == 'grouped_fifo':
deadline = stats.get('deadline')
volume = stats.get('volume')
release_waiting_requests_grouped_fifo(rse_id, count=to_be_released, direction=direction, volume=volume, deadline=deadline, session=session)
elif strategy == 'fifo':
release_waiting_requests_fifo(rse_id, count=to_be_released, direction=direction, session=session)
else:
logger(logging.DEBUG, "Throttler has done nothing on rse %s (transfer > 0.8 * threshold)" % rse_name)
elif waiting > 0 or not threshold:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests, rse %s" % (threshold, rse_name))
delete_rse_transfer_limits(rse_id, activity='all_activities', session=session)
release_all_waiting_requests(rse_id, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': 'all_activities', 'rse': rse_name})
def __release_per_activity(stats, direction, rse_name, rse_id, logger, session):
"""
Release requests per activity.
:param stats: Request statistics
:param direction: String whether request statistics are based on source or destination RSEs.
:param rse_name: RSE name.
:param rse_id: RSE id.
"""
for activity in stats['activities']:
threshold = stats['activities'][activity]['threshold']
transfer = stats['activities'][activity]['transfer']
waiting = stats['activities'][activity]['waiting']
if waiting:
logger(logging.DEBUG, "Request status for %s at %s: %s" % (activity, rse_name,
stats['activities'][activity]))
if threshold is None:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse_id %s" % (threshold, activity, rse_id))
delete_rse_transfer_limits(rse_id, activity=activity, session=session)
release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
elif transfer + waiting > threshold:
logger(logging.DEBUG, "Throttler set limits for activity %s, rse %s" % (activity, rse_name))
set_rse_transfer_limits(rse_id, activity=activity, max_transfers=threshold, transfers=transfer, waitings=waiting, session=session)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.max_transfers' % (activity, rse_name), threshold)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.transfers' % (activity, rse_name), transfer)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.waitings' % (activity, rse_name), waiting)
if transfer < 0.8 * threshold:
# release requests on account
nr_accounts = len(stats['activities'][activity]['accounts'])
if nr_accounts < 1:
nr_accounts = 1
to_release = threshold - transfer
threshold_per_account = math.ceil(threshold / nr_accounts)
to_release_per_account = math.ceil(to_release / nr_accounts)
accounts = stats['activities'][activity]['accounts']
for account in accounts:
if nr_accounts == 1:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release, activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release, direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release)
elif accounts[account]['transfer'] > threshold_per_account:
logger(logging.DEBUG, "Throttler will not release %s waiting requests for activity %s, rse %s, account %s: It queued more transfers than its share " %
(accounts[account]['waiting'], activity, rse_name, account))
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
elif accounts[account]['waiting'] < to_release_per_account:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (accounts[account]['waiting'], activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=accounts[account]['waiting'], direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), accounts[account]['waiting'])
to_release = to_release - accounts[account]['waiting']
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
else:
logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release_per_account, activity, rse_name, account))
release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release_per_account, direction=direction, session=session)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release_per_account)
to_release = to_release - to_release_per_account
nr_accounts -= 1
else:
logger(logging.DEBUG, "Throttler has done nothing for activity %s on rse %s (transfer > 0.8 * threshold)" % (activity, rse_name))
elif waiting > 0:
logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse %s" % (threshold, activity, rse_name))
delete_rse_transfer_limits(rse_id, activity=activity, session=session)
release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
|
appjar.py
|
# -*- coding: utf-8 -*-
""" appJar.py: Provides a GUI class, for making simple tkinter GUIs. """
# Nearly everything I learnt came from: http://effbot.org/tkinterbook/
# with help from: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/index.html
# with snippets from stackexchange.com
# make print & unicode backwards compatible
from __future__ import print_function
from __future__ import unicode_literals
# Import tkinter classes - handles python2 & python3
try:
# for Python2
from Tkinter import *
import tkMessageBox as MessageBox
import tkSimpleDialog as SimpleDialog
from tkColorChooser import askcolor
import tkFileDialog as filedialog
import ScrolledText as scrolledtext
import tkFont as tkFont
# used to check if functions have a parameter
from inspect import getargspec as getArgs
PYTHON2 = True
PY_NAME = "Python"
STRING = basestring
except ImportError:
# for Python3
from tkinter import *
from tkinter import messagebox as MessageBox
from tkinter import simpledialog as SimpleDialog
from tkinter.colorchooser import askcolor
from tkinter import filedialog
from tkinter import scrolledtext
from tkinter import font as tkFont
# used to check if functions have a parameter
from inspect import getfullargspec as getArgs
PYTHON2 = False
PY_NAME = "python3"
STRING = str
# import other useful classes
import os
import sys
import locale
import re
import imghdr # images
import time # splashscreen
import calendar # datepicker
import datetime # datepicker & image
import logging # python's logger
import inspect # for logging
import argparse # argument parser
from contextlib import contextmanager # generators
import __main__ as theMain
from platform import system as platform
# we need to import these too
# but will only import them when needed
random = None
ttk = ThemedStyle = None
hashlib = None
ToolTip = None
nanojpeg = PngImageTk = array = None # extra image support
EXTERNAL_DND = None
INTERNAL_DND = None
types = None # used to register dnd functions
winsound = None
FigureCanvasTkAgg = Figure = None # matplotlib
parseString = TreeItem = TreeNode = None # AjTree
# GoogleMap
base64 = urlencode = urlopen = urlretrieve = quote_plus = json = None
ConfigParser = codecs = ParsingError = None # used to parse language files
Thread = Queue = None
sqlite3 = None
turtle = None
webbrowser = None # links
OrderedDict = None # tabbedFrames
# to allow tkinter or ttk
frameBase = Frame
labelBase = Label
scaleBase = Scale
entryBase = Entry
# details
__author__ = "Richard Jarvis"
__copyright__ = "Copyright 2015-2018, Richard Jarvis"
__credits__ = ["Graham Turner", "Sarah Murch"]
__license__ = "Apache 2.0"
__version__ = "0.93.0"
__maintainer__ = "Richard Jarvis"
__email__ = "info@appJar.info"
__status__ = "Development"
__url__ = "http://appJar.info"
try:
__locale__ = locale.getdefaultlocale()[0]
except ValueError:
__locale__ = None
####################################################
# The main GUI class - this provides all functions
####################################################
class gui(object):
""" Class to represent the GUI
- Create one of these
- add some widgets
- call the go() function """
# ensure only one instance of gui is created
# set to True in constructor
# set back to false in stop()
instantiated = False
built = False
# static variables
exe_file = None
exe_path = None
lib_file = None
lib_path = None
# globals for supported platforms
WINDOWS = 1
MAC = 2
LINUX = 3
# positioning
N = N
NE = NE
E = E
SE = SE
S = S
SW = SW
W = W
NW = NW
CENTER = CENTER
LEFT = LEFT
RIGHT = RIGHT
# reliefs
SUNKEN = SUNKEN
RAISED = RAISED
GROOVE = GROOVE
RIDGE = RIDGE
FLAT = FLAT
###################################
# Constants for music stuff
###################################
BASIC_NOTES = {
"A": 440,
"B": 493,
"C": 261,
"D": 293,
"E": 329,
"F": 349,
"G": 392,
}
NOTES = {'f8': 5587, 'c#6': 1108, 'f4': 349, 'c7': 2093,
'd#2': 77, 'g8': 6271, 'd4': 293, 'd7': 2349,
'd#7': 2489, 'g#4': 415, 'e7': 2637, 'd9': 9397,
'b8': 7902, 'a#4': 466, 'b5': 987, 'b2': 123,
'g#9': 13289, 'g9': 12543, 'f#2': 92, 'c4': 261,
'e1': 41, 'e6': 1318, 'a#8': 7458, 'c5': 523,
'd6': 1174, 'd3': 146, 'g7': 3135, 'd2': 73,
'd#3': 155, 'g#6': 1661, 'd#4': 311, 'a3': 219,
'g2': 97, 'c#5': 554, 'd#9': 9956, 'a8': 7040,
'a#5': 932, 'd#5': 622, 'a1': 54, 'g#8': 6644,
'a2': 109, 'g#5': 830, 'f3': 174, 'a6': 1760,
'e8': 5274, 'c#9': 8869, 'f5': 698, 'b1': 61,
'c#4': 277, 'f#9': 11839, 'e5': 659, 'f9': 11175,
'f#5': 739, 'a#1': 58, 'f#8': 5919, 'b7': 3951,
'c#8': 4434, 'g1': 48, 'c#3': 138, 'f#7': 2959,
'c6': 1046, 'c#2': 69, 'c#7': 2217, 'c3': 130,
'e9': 10548, 'c9': 8372, 'a#6': 1864, 'a#7': 3729,
'g#2': 103, 'f6': 1396, 'b3': 246, 'g#3': 207,
'b4': 493, 'a7': 3520, 'd#6': 1244, 'd#8': 4978,
'f2': 87, 'd5': 587, 'f7': 2793, 'f#6': 1479,
'g6': 1567, 'e3': 164, 'f#3': 184, 'g#1': 51,
'd8': 4698, 'f#4': 369, 'f1': 43, 'c8': 4186,
'g4': 391, 'g3': 195, 'a4': 440, 'a#3': 233,
'd#1': 38, 'e2': 82, 'e4': 329, 'a5': 880,
'a#2': 116, 'g5': 783, 'g#7': 3322, 'b6': 1975,
'c2': 65, 'f#1': 46
}
DURATIONS = {"BREVE": 2000, "SEMIBREVE": 1000, "MINIM": 500,
"CROTCHET": 250, "QUAVER": 125, "SEMIQUAVER": 63,
"DEMISEMIQUAVER": 32, "HEMIDEMISEMIQUAVER": 16
}
###############################################
# USEFUL STATIC METHODS
###############################################
@staticmethod
def CENTER(win, up=0):
gui.SET_LOCATION("CENTER", win=win, up=up)
@staticmethod
def SET_LOCATION(x, y=None, ignoreSettings=None, win=None, up=0):
if ignoreSettings is not None:
win.ignoreSettings = ignoreSettings
if gui.GET_PLATFORM() != gui.LINUX:
trans = win.attributes('-alpha')
win.attributes('-alpha', 0.0)
win.update_idletasks()
if isinstance(x, STRING) and x.lower() in ['c', 'center', 'centre'] and y is None:
x = y = 'c'
else:
x, y = gui.PARSE_TWO_PARAMS(x, y)
gui.trace("Set location called with %s, %s", x, y)
# get the window's dimensions
dims = gui.GET_DIMS(win)
# set any center positions
if isinstance(x, STRING) and x.lower() in ['c', 'center', 'centre']: x = dims["x"]
if isinstance(y, STRING) and y.lower() in ['c', 'center', 'centre']: y = dims["y"]
# move the window up a bit if requested
y = y - up if up < y else 0
# fix any out of bounds positions
if x < 0 or x > dims['s_width']: x = dims['x']
if y < 0 or y > dims['s_height']: y = dims['y']
gui.trace("Screen: %sx%s. Requested: %sx%s. Location: %s, %s",
dims["s_width"], dims["s_height"], dims["b_width"],
dims["b_height"], x, y)
win.geometry("+%d+%d" % (x, y))
win.locationSet = True
if gui.GET_PLATFORM() != gui.LINUX:
win.attributes('-alpha', trans)
@staticmethod
def CLEAN_CONFIG_DICTIONARY(**kw):
""" Used by all Classes to tidy up dictionaries passed into config functions
Allows us to more quickly process the dictionaries when overriding config """
try: kw['bg'] = kw.pop('background')
except: pass
try: kw['fg'] = kw.pop('foreground')
except: pass
kw = dict((k.lower().strip(), v) for k, v in kw.items())
return kw
@staticmethod
def GET_PLATFORM():
""" returns one of the gui class's three static platform variables """
if platform() in ["win32", "Windows"]:
return gui.WINDOWS
elif platform() == "Darwin":
return gui.MAC
elif platform() in ["Linux", "FreeBSD"]:
return gui.LINUX
else:
raise Exception("Unknown platform: " + platform())
@staticmethod
def SHOW_VERSION():
""" returns a printable string containing version information """
verString = \
"appJar: " + str(__version__) \
+ "\nPython: " + str(sys.version_info[0]) \
+ "." + str(sys.version_info[1]) + "." + str(sys.version_info[2]) \
+ "\nTCL: " + str(TclVersion) \
+ ", TK: " + str(TkVersion) \
+ "\nPlatform: " + str(platform()) \
+ "\npid: " + str(os.getpid()) \
+ "\nlocale: " + str(__locale__)
return verString
@staticmethod
def SHOW_PATHS():
""" returns a printable string containing path to libraries, etc """
pathString = \
"File Name: " + (gui.exe_file if gui.exe_file is not None else "") \
+ "\nFile Location: " + (gui.exe_path if gui.exe_path is not None else "") \
+ "\nLib Location: " + (gui.lib_path if gui.lib_path is not None else "")
return pathString
@staticmethod
def GET_DIMS(container):
""" returns a dictionary of dimensions for the supplied container """
container.update()
dims = {}
# get the apps requested width & height
dims["r_width"] = container.winfo_reqwidth()
dims["r_height"] = container.winfo_reqheight()
# get the current width & height
dims["w_width"] = container.winfo_width()
dims["w_height"] = container.winfo_height()
# get the window's width & height
dims["s_width"] = container.winfo_screenwidth()
dims["s_height"] = container.winfo_screenheight()
# determine best geom for OS
# on MAC & LINUX, w_width/w_height always 1 unless user-set
# on WIN, w_height is bigger then r_height - leaving empty space
if gui.GET_PLATFORM() in [gui.MAC, gui.LINUX]:
if dims["w_width"] != 1:
dims["b_width"] = dims["w_width"]
dims["b_height"] = dims["w_height"]
else:
dims["b_width"] = dims["r_width"]
dims["b_height"] = dims["r_height"]
else:
dims["b_height"] = max(dims["r_height"], dims["w_height"])
dims["b_width"] = max(dims["r_width"], dims["w_width"])
# GUI's corner - widget's corner
# widget's corner can be 0 on windows when size not set by user
dims["outerFrameWidth"] = 0 if container.winfo_x() == 0 else container.winfo_rootx() - container.winfo_x()
dims["titleBarHeight"] = 0 if container.winfo_rooty() == 0 else container.winfo_rooty() - container.winfo_y()
# add it all together
dims["actualWidth"] = dims["b_width"] + (dims["outerFrameWidth"] * 2)
dims["actualHeight"] = dims["b_height"] + dims["titleBarHeight"] + dims["outerFrameWidth"]
dims["x"] = (dims["s_width"] // 2) - (dims["actualWidth"] // 2)
dims["y"] = (dims["s_height"] // 2) - (dims["actualHeight"] // 2)
return dims
@staticmethod
def PARSE_TWO_PARAMS(x, y):
""" used to convert different possible x/y params to a tuple
"""
if y is not None:
return (x,y)
else:
if isinstance(x, (list, tuple)):
return (x[0], x[1])
else:
if isinstance(x, str):
x=x.strip()
if "," in x:
return [int(w.strip()) for w in x.split(",")]
return (x, x)
@staticmethod
def SPLIT_GEOM(geom):
""" returns 2 lists made from the geom string
:param geom: the geom string to parse
:returns: a tuple containing a width/heiht tuple & a x/y position tuple
"""
geom = geom.lower().split("x")
width = int(float(geom[0]))
height = int(float(geom[1].split("+")[0]))
try:
x = int(float(geom[1].split("+")[1]))
y = int(float(geom[1].split("+")[2]))
except IndexError:
x = y = -1
return (width, height), (x, y)
@staticmethod
def MOUSE_POS_IN_WIDGET(widget, event, findRoot=True):
""" returns the mouse's relative position in a widget
:param widget: the widget to look in
:param event: the event containing the mouse coordinates
:param findRoot: if we should make this relative to the parent
"""
# first we have to get the real master
master = widget
while findRoot:
if isinstance(master, (SubWindow, Tk)):
findRoot = False
else:
master = master.master
# subtract the widget's top left corner from the root window's top corner
x = event.x_root - master.winfo_rootx()
y = event.y_root - master.winfo_rooty()
gui.trace("<<MOUSE_POS_IN_WIDGET>> %s %s,%s", widget, x, y)
return (x, y)
#####################################
#####################################
# CONSTRUCTOR - creates the GUI
#####################################
def __init__(
self, title=None, geom=None, handleArgs=True, language=None,
startWindow=None, useTtk=False, useSettings=False, showIcon=True, **kwargs
):
""" constructor - sets up the empty GUI window, and inits the various properties """
if self.__class__.instantiated:
raise Exception("You cannot have more than one instance of gui, try using a subWindow.")
else:
self.__class__.instantiated = True
self.alive = True
# first up, set the logger
def _logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(logging.DEBUG-5):
self._log(logging.DEBUG-5, message, args, **kwargs)
def _logToRoot(message, *args, **kwargs):
logging.log(logging.DEBUG-5, message, *args, **kwargs)
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(name)s:%(levelname)s %(message)s')
logging.addLevelName(logging.DEBUG - 5, 'TRACE')
setattr(logging, 'TRACE', logging.DEBUG -5)
setattr(logging.getLoggerClass(), "trace", _logForLevel)
setattr(logging, "trace", _logToRoot)
logFile = kwargs.pop("file", kwargs.pop("logFile", None))
logLevel = kwargs.pop("log", kwargs.pop("logLevel", None))
self._language = language
self.useSettings = useSettings
self.settingsFile = "appJar.ini"
self.externalSettings = {}
self.startWindow = startWindow
# check any command line arguments
args = self._handleArgs() if handleArgs else None
# warn if we're in an untested mode
self._checkMode()
# first out, verify the platform
self.platform = gui.GET_PLATFORM()
# init the widget enum
self.Widgets = Enum(
widgets=["Label", "Message", "Entry", "TextArea", "Button",
"FileEntry", "DirectoryEntry", "Scale", "Link", "Meter", "Image",
"CheckBox", "RadioButton", "ListBox", "SpinBox", "OptionBox",
"TickOptionBox", "Accelerators",
"Map", "PieChart", "Properties", "Table", "Plot", "MicroBit",
"DatePicker", "Separator", "Turtle", "Canvas",
"LabelFrame", "Frame", "TabbedFrame", "PanedFrame", "ToggleFrame",
"FrameStack", "SubFrame", "FrameBox", "FrameLabel", "ContainerLog", "FlashLabel",
"AnimationID", "ImageCache", "Menu",
"SubWindow", "ScrollPane", "PagedWindow", "Notebook", "Tree",
"Widget", "Window", "Toolbar", "RootPage",
"Note", "Tab", "Page", "Pane"],
excluded=["DatePicker", "SubWindow", "Window", "Toolbar",
"Note", "Tab", "Page", "Pane", "RootPage", "FlashLabel",
"AnimationID", "ImageCache", "TickOptionBox", "Accelerators",
"FileEntry", "DirectoryEntry",
"FrameBox", "FrameLabel", "ContainerLog", "Menu"],
keepers=["Accelerators", "ImageCache", "Menu", "Toolbar"]
)
# process any command line arguments
self.ttkFlag = False
selectedTtkTheme = None
if handleArgs:
if args.f:
gui.setLogFile(args.f)
logFile = None # don't use any param logFile
tmplevel, logLevel = logLevel, None
if args.c: gui.setLogLevel("CRITICAL")
elif args.e: gui.setLogLevel("ERROR")
elif args.w: gui.setLogLevel("WARNING")
elif args.i: gui.setLogLevel("INFO")
elif args.d: gui.setLogLevel("DEBUG")
elif args.t: gui.setLogLevel("TRACE")
else: loglevel = tmplevel
if logFile is not None: gui.setLogFile(logFile)
if logLevel is not None: gui.setLogLevel(logLevel)
if handleArgs:
if args.l: self._language = args.l
if args.ttk:
useTtk = True
if args.ttk is not True:
selectedTtkTheme = args.ttk
if args.s:
self.useSettings = True
if args.s is not True:
self.settingsFile = args.s
# configure as ttk
if useTtk:
self._useTtk()
if useTtk is not True:
selectedTtkTheme = useTtk
# a stack to hold containers as being built
# done here, as initArrays is called elsewhere - to reset the gubbins
self.containerStack = []
self.translations = {"POPUP":{}, "SOUND":{}, "EXTERNAL":{}}
# first up, set up all the data stores
self._initVars()
# dynamically create lots of functions for configuring stuff
self._buildConfigFuncs()
# language parser
self.configParser = None
# set up some default path locations
# this fails if in interactive mode....
try:
gui.exe_file = str(os.path.basename(theMain.__file__))
gui.exe_path = str(os.path.dirname(theMain.__file__))
except:
pass
gui.lib_file = os.path.abspath(__file__)
gui.lib_path = os.path.dirname(gui.lib_file)
# location of appJar
self.resource_path = os.path.join(gui.lib_path, "resources")
self.icon_path = os.path.join(self.resource_path, "icons")
self.sound_path = os.path.join(self.resource_path, "sounds")
self.appJarIcon = os.path.join(self.icon_path, "favicon.ico")
# user configurable
self.userImages = gui.exe_path
self.userSounds = gui.exe_path
# create the main window - topLevel
self.topLevel = Tk()
self.topLevel.bind('<Configure>', self._windowEvent)
def _setFocus(e):
try: e.widget.focus_set()
except: pass
# these are specifically to make right-click menus disapear on linux
self.topLevel.bind('<Button-1>', lambda e: _setFocus(e))
self.topLevel.bind('<Button-2>', lambda e: _setFocus(e))
self.topLevel.bind('<Button-3>', lambda e: _setFocus(e))
# override close button
self.topLevel.protocol("WM_DELETE_WINDOW", self.stop)
# temporarily hide it
self.topLevel.withdraw()
# used to keep a handle on the last pop-up dialog
# allows the dialog to be closed remotely
# mainly for test-automation
self.topLevel.POP_UP = None
# create a frame to store all the widgets
# now a canvas to allow animation...
self.appWindow = CanvasDnd(self.topLevel)
self.appWindow.pack(fill=BOTH, expand=True)
self.topLevel.canvasPane = self.appWindow
# set the windows title
if title is None:
title = "appJar" if gui.exe_file is None else gui.exe_file
self.setTitle(title)
# configure the geometry of the window
self.topLevel.escapeBindId = None # used to exit fullscreen
self.topLevel.stopFunction = None # used to exit fullscreen
self.topLevel.startFunction = None
# set the resize status - default to True
self.topLevel.locationSet = False
self.topLevel.ignoreSettings = False
self.topLevel.isFullscreen = False # records if we're in fullscreen - stops hideTitle from breaking
self.topLevel.displayed = True
if geom is not None: self.setSize(geom)
self.setResizable(True)
# 3 fonts used for most widgets
self._buttonFont = tkFont.Font(family="Helvetica", size=12,)
self._labelFont = tkFont.Font(family="Helvetica", size=12)
self._inputFont = tkFont.Font(family="Helvetica", size=12)
self._statusFont = tkFont.Font(family="Helvetica", size=12)
# dedicated font for access widget
self._accessFont = tkFont.Font(family="Arial", size=11,)
# dedicated font for links - forces bold & underlined, but updated with label fonts
self._linkFont = tkFont.Font(family="Helvetica", size=12, weight='bold', underline=1)
self.tableFont = tkFont.Font(family="Helvetica", size=12)
# create a menu bar - only shows if populated
# now created in menu functions, as it generated a blank line...
self.hasMenu = False
self.hasStatus = False
self.hasTb = False
self.tbPinned = True
self.pinBut = None
self.copyAndPaste = CopyAndPaste(self.topLevel, self)
# won't pack, if don't pack it here
class Toolbar(frameBase, object):
def __init__(self, master, **kwargs):
super(Toolbar, self).__init__(master, **kwargs)
class WidgetContainer(frameBase, object):
def __init__(self, master, **kwargs):
super(WidgetContainer, self).__init__(master, **kwargs)
self.tb = Toolbar(self.appWindow)
if not self.ttkFlag:
self.tb.config(bd=1, relief=RAISED)
else:
self.tb.config(style="Toolbar.TFrame")
# self.tb.pack(side=TOP, fill=X)
self.tbMinMade = False
# create the main container for this GUI
container = WidgetContainer(self.appWindow)
# container = Label(self.appWindow) # made as a label, so we can set an
# image
if not self.ttkFlag:
container.config(padx=2, pady=2, background=self.topLevel.cget("bg"))
container.pack(fill=BOTH, expand=True)
self._addContainer("root", self.Widgets.RootPage, container, 0, 1)
# set up the main container to be able to host an image
self._configBg(container)
if self.platform == self.WINDOWS and showIcon:
try:
self.setIcon(self.appJarIcon)
except: # file not found
gui.trace("Error setting Windows default icon")
# set the ttk theme
if self.ttkFlag:
self.setTtkTheme(selectedTtkTheme)
# for configuting event processing
self.EVENT_SIZE = 1000
self.EVENT_SPEED = 100
self.preloadAnimatedImageId = None
self.processQueueId = None
# an array to hold any threaded events....
self.events = []
self.pollTime = 250
self._fastStop = False
self.configure(**kwargs)
# special bindings
self._globalBindings()
self.built = True
def _globalBindings(self):
def _selectEntry(event):
event.widget.select_range(0, 'end')
def _selectText(event):
event.widget.tag_add("sel","1.0","end")
def _scrollPaste(event):
event.widget.event_generate('<<Paste>>')
event.widget.see(END)
if self.GET_PLATFORM() == self.MAC:
self.topLevel.bind_class("Text", "<Command-a>", _selectText)
self.topLevel.bind_class("Entry", "<Command-a>", _selectEntry)
self.topLevel.bind_class("Text", "<Command-v>", _scrollPaste)
else:
self.topLevel.bind_class("Text", "<Control-a>", _selectText)
self.topLevel.bind_class("Entry", "<Control-a>", _selectEntry)
self.topLevel.bind_class("Text", "<Control-v>", _scrollPaste)
def _handleArgs(self):
""" internal function to handle command line arguments """
parser = argparse.ArgumentParser(
description="appJar - the easiest way to create GUIs in python",
epilog="For more information, go to: http://appJar.info"
)
parser.add_argument("-v", "--version", action="version", version=gui.SHOW_VERSION(), help="show version information and exit")
logGroup = parser.add_mutually_exclusive_group()
logGroup.add_argument("-c", action="store_const", const=True, help="only log CRITICAL messages")
logGroup.add_argument("-e", action="store_const", const=True, help="log ERROR messages and above")
logGroup.add_argument("-w", action="store_const", const=True, help="log WARNING messages and above")
logGroup.add_argument("-i", action="store_const", const=True, help="log INFO messages and above")
logGroup.add_argument("-d", action="store_const", const=True, help="log DEBUG messages and above")
logGroup.add_argument("-t", action="store_const", const=True, help="log TRACE messages and above")
parser.add_argument("-l", metavar="LANGUAGE.ini", help="set a language file to use")
parser.add_argument("-f", metavar="file.log", help="set a log file to use")
parser.add_argument("-s", metavar="SETTINGS", const=True, nargs="?", help="load settings, from an optional file name")
parser.add_argument("--ttk", metavar="THEME", const=True, nargs="?", help="enable ttk, with an optional theme")
return parser.parse_args()
# function to check on mode
def _checkMode(self):
""" internal function to warn about issues in certain modes """
# detect if we're in interactive mode
if hasattr(sys, 'ps1'):
self.warn("Interactive mode is not fully tested, some features might not work.")
else:
if sys.flags.interactive:
self.warn("Postmortem Interactive mode is not fully tested, some features might not work.")
# also, check for iPython
try:
__IPYTHON__
except NameError:
#no iPython - ignore
pass
else:
self.warn("iPython is not fully tested, some features might not work.")
def _configBg(self, container):
""" internal function to set up a label as the BG """
# set up a background image holder
# alternative to label option above, as label doesn't update widgets
# properly
class BgLabel(labelBase, object):
def __init__(self, master, **kwargs):
super(BgLabel, self).__init__(master, **kwargs)
if not self.ttkFlag:
self.bgLabel = BgLabel(container, anchor=CENTER, font=self._getContainerProperty('labelFont'), background=self._getContainerBg())
else:
self.bgLabel = ttk.Label(container)
self.bgLabel.place(x=0, y=0, relwidth=1, relheight=1)
container.image = None
#####################################
# TTK functions
#####################################
def _useTtk(self):
""" enables use of ttk """
global ttk, frameBase, labelBase, scaleBase, entryBase
try:
import ttk
except:
try:
from tkinter import ttk
except:
gui.error("ttk not available")
return
self.ttkFlag = True
frameBase = ttk.Frame
labelBase = ttk.Label
scaleBase = ttk.Scale
entryBase = ttk.Entry
gui.trace("Mode switched to ttk")
def _loadTtkThemes(self):
global ThemedStyle
if ThemedStyle is None:
try:
from ttkthemes import ThemedStyle
self.ttkStyle = ThemedStyle(self.topLevel)
except:
ThemedStyle = False
def getTtkThemes(self, loadThemes=False):
if loadThemes:
self._loadTtkThemes()
if not ThemedStyle:
self.error("Custom ttkThemes not available")
return self.ttkStyle.theme_names()
def getTtkTheme(self):
return self.ttkStyle.theme_use()
# only call this after the main tk has been created
# otherwise we get two windows!
def setTtkTheme(self, theme=None):
""" sets the ttk theme to use """
self.ttkStyle = ttk.Style()
gui.trace("Switching ttk theme to: %s", theme)
if theme is not None:
try:
self.ttkStyle.theme_use(theme)
except:
gui.trace("no basic ttk theme named %s found, searching for additional themes", theme)
self._loadTtkThemes()
if not ThemedStyle:
self.error("ttk theme: %s unavailable. Try one of: %s", theme, str(self.ttkStyle.theme_names()))
else:
self.ttkStyle.set_theme(theme)
# set up our ttk styles
self.ttkStyle.configure("DefaultText.TEntry", foreground="grey")
self.ttkStyle.configure("ValidationEntryValid.TEntry", foreground="#4CC417", highlightbackground="#4CC417", highlightcolor="#4CC417", highlightthickness='20')
self.ttkStyle.configure("ValidationEntryInvalid.TEntry", foreground="#FF0000", highlightbackground="#FF0000", highlightcolor="#FF0000", highlightthickness='20')
self.ttkStyle.configure("ValidationEntryWait.TEntry", foreground="#000000", highlightbackground="#000000", highlightcolor="#000000", highlightthickness='20')
self.ttkStyle.configure("ValidationEntryValid.TLabel", foreground="#4CC417")
self.ttkStyle.configure("ValidationEntryInvalid.TLabel", foreground="#FF0000")
self.ttkStyle.configure("ValidationEntryWait.TLabel", foreground="#000000")
self.ttkStyle.configure("Link.TLabel", foreground="#0000ff")
self.ttkStyle.configure("LinkOver.TLabel", foreground="#3366ff")
#toolbars
self.ttkStyle.configure("Toolbar.TFrame")
self.ttkStyle.configure("Toolbar.TButton", padding=0, expand=0)
# self.fgColour = self.topLevel.cget("foreground")
# self.buttonFgColour = self.topLevel.cget("foreground")
# self.labelFgColour = self.topLevel.cget("foreground")
# set a property for ttk theme
ttkTheme = property(getTtkTheme, setTtkTheme)
###############################################################
# library loaders - on demand loading of different classes
###############################################################
def _loadRandom(self):
""" loasd random libraries """
global random
if random is None:
import random
def _loadTurtle(self):
""" loasd turtle libraries """
global turtle
try:
import turtle
except:
turtle = False
self.error("Turtle not available")
def _loadConfigParser(self):
""" loads the ConfigParser, used by internationalisation & settings """
global ConfigParser, ParsingError, codecs
if ConfigParser is None:
try:
from configparser import ConfigParser
from configparser import ParsingError
import codecs
except:
try:
from ConfigParser import ConfigParser
from ConfigParser import ParsingError
import codecs
except:
ConfigParser = ParsingError = codecs = False
self.configParser = None
return
self.configParser = ConfigParser()
self.configParser.optionxform = str
def _loadHashlib(self):
""" loads hashlib - used by text area """
global hashlib
if hashlib is None:
try:
import hashlib
except:
hashlib = False
def _loadTooltip(self):
""" loads tooltips - used all over """
global ToolTip
if ToolTip is None:
try:
from appJar.lib.tooltip import ToolTip
except:
ToolTip = False
def _loadMatplotlib(self):
""" loads matPlotLib """
global FigureCanvasTkAgg, Figure
if FigureCanvasTkAgg is None:
try:
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
except:
FigureCanvasTkAgg = Figure = False
def _loadExternalDnd(self):
""" loads external dnd - from other applications """
global EXTERNAL_DND
if EXTERNAL_DND is None:
try:
tkdndlib = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lib", "tkdnd2.8")
os.environ['TKDND_LIBRARY'] = tkdndlib
from appJar.lib.TkDND_wrapper import TkDND as EXTERNAL_DND
self.dnd = EXTERNAL_DND(self.topLevel)
except:
EXTERNAL_DND = False
def _loadInternalDnd(self):
""" loads the internal dnd libraries """
global INTERNAL_DND, types
if INTERNAL_DND is None:
try:
import Tkdnd as INTERNAL_DND
import types as types
except:
try:
from tkinter import dnd as INTERNAL_DND
import types as types
except:
INTERNAL_DND = False
types = False
def _loadURL(self):
""" loads ibraries used by googlemaps widget """
global base64, urlencode, urlopen, urlretrieve, quote_plus, json, Queue
self._loadThreading()
if Queue:
if urlencode is None:
try: # python 2
from urllib import urlencode, urlopen, urlretrieve, quote_plus
import json
import base64
except ImportError: # python 3
try:
from urllib.parse import urlencode
from urllib.parse import quote_plus
from urllib.request import urlopen
from urllib.request import urlretrieve
import json
import base64
except:
base64 = urlencode = urlopen = urlretrieve = quote_plus = json = Queue = False
else:
base64 = urlencode = urlopen = urlretrieve = quote_plus = json = Queue = False
def _loadThreading(self):
""" loads threading classes, and sets up queue """
global Thread, Queue
if Thread is None:
try:
from threading import Thread
import Queue
except ImportError: # python 3
try:
from threading import Thread
import queue as Queue
except:
Thread = Queue = False
return
self.eventQueue = Queue.Queue(maxsize=self.EVENT_SIZE)
self._processEventQueue()
def _loadNanojpeg(self):
""" loads jpeg support """
global nanojpeg, array
if nanojpeg is None:
try:
from appJar.lib import nanojpeg
import array
except:
nanojpeg = False
array = False
def _loadWinsound(self):
""" loads winsound support on Windows """
global winsound
if winsound is None:
if platform() in ["win32", "Windows"]:
import winsound
else:
winsound = False
def _importPngimagetk(self):
""" loads PNG support """
global PngImageTk
if PngImageTk is None:
try:
from appJar.lib.tkinter_png import PngImageTk
except:
PngImageTk = False
def _importAjtree(self):
""" loads tree support - and creates tree classes """
global parseString, TreeItem, TreeNode
if TreeNode is None:
try:
from idlelib.TreeWidget import TreeItem, TreeNode
except:
try:
from idlelib.tree import TreeItem, TreeNode
except:
gui.warning("no trees")
TreeItem = TreeNode = parseString = False
if TreeNode is not False:
try:
from xml.dom.minidom import parseString
except:
gui.warning("no parse string")
TreeItem = TreeNode = parseString = False
return
def _importSqlite3(self):
""" loads sqlite3 """
global sqlite3
if sqlite3 is None:
try:
import sqlite3
except:
sqlite3 = False
def _importWebBrowser(self):
""" loads webbrowser """
global webbrowser
if webbrowser is None:
try:
import webbrowser
except:
webbrowser = False
#####################################
# FUNCTIONS FOR UNIVERSAL DND
#####################################
def _registerExternalDragSource(self, title, widget, function=None):
""" register a widget to start external drag events """
self._loadExternalDnd()
if EXTERNAL_DND is not False:
try:
self.dnd.bindsource(widget, self._startExternalDrag, 'text/uri-list')
self.dnd.bindsource(widget, self._startExternalDrag, 'text/plain')
widget.dndFunction = function
widget.dragData = None
except:
# dnd not working on this platform
raise Exception("Failed to register external Drag'n Drop for: " + str(title))
else:
raise Exception("External Drag'n Drop not available on this platform")
def _registerExternalDropTarget(self, title, widget, function=None, replace=True):
""" register a widget to receive external drag events """
self._loadExternalDnd()
if EXTERNAL_DND is not False:
try:
self.dnd.bindtarget(widget, self._receiveExternalDrop, 'text/uri-list')
self.dnd.bindtarget(widget, self._receiveExternalDrop, 'text/plain')
# cater for new drop parameter in new setters
if function == True: function = None
widget.dndFunction = function
widget.dropData = None
widget.dropReplace = replace
except:
# dnd not working on this platform
raise Exception("Failed to register external Drag'n Drop for: " + str(title))
else:
raise Exception("External Drag'n Drop not available on this platform")
def _registerInternalDragSource(self, kind, title, widget, function=None):
""" register a widget to start internal drag events """
self._loadInternalDnd()
name = None
if kind == self.Widgets.Label:
name = self.getLabel(title)
if INTERNAL_DND is not False:
try:
widget.bind('<ButtonPress>', lambda e: self._startInternalDrag(e, title, name, widget))
widget.dnd_canvas = self._getCanvas().canvasPane
gui.trace("DND drag source created: %s on canvas %s", widget, widget.dnd_canvas)
except:
raise Exception("Failed to register internal Drag'n Drop for: " + str(title))
else:
raise Exception("Internal Drag'n Drop not available on this platform")
def _registerInternalDropTarget(self, widget, function):
""" register a widget to receive internal drag events """
gui.trace("<<WIDGET._registerInternalDropTarget>> %s", widget)
self._loadInternalDnd()
if not INTERNAL_DND:
raise Exception("Internal Drag'n Drop not available on this platform")
# called by DND class, when looking for a DND target
def dnd_accept(self, source, event):
gui.trace("<<WIDGET.dnd_accept>> %s - %s", widget, self.dnd_canvas)
return self
# This is called when the mouse pointer goes from outside the
# Target Widget to inside the Target Widget.
def dnd_enter(self, source, event):
gui.trace("<<WIDGET.dnd_enter>> %s", widget)
XY = gui.MOUSE_POS_IN_WIDGET(self,event)
source.appear(self, XY)
# This is called when the mouse pointer goes from inside the
# Target Widget to outside the Target Widget.
def dnd_leave(self, source, event):
gui.trace("<<WIDGET.dnd_leave>> %s", widget)
# hide the dragged object
source.vanish()
#This is called if the DraggableWidget is being dropped on us.
def dnd_commit(self, source, event):
source.vanish(all=True)
gui.trace("<<WIDGET.dnd_commit>> %s Object received=%s", widget, source)
#This is called when the mouse pointer moves within the TargetWidget.
def dnd_motion(self, source, event):
gui.trace("<<WIDGET.dnd_motion>> %s", widget)
XY = gui.MOUSE_POS_IN_WIDGET(self,event)
# move the dragged object
source.move(self, XY)
def keepWidget(self, title, name):
if self.drop_function is not None:
return self.drop_function(title, name)
else:
self.configParser(text=name)
return True
widget.dnd_accept = types.MethodType(dnd_accept, widget)
widget.dnd_enter = types.MethodType(dnd_enter, widget)
widget.dnd_leave = types.MethodType(dnd_leave, widget)
widget.dnd_commit = types.MethodType(dnd_commit, widget)
widget.dnd_motion = types.MethodType(dnd_motion, widget)
widget.keepWidget = types.MethodType(keepWidget, widget)
# save the underlying canvas
widget.dnd_canvas = self._getCanvas().canvasPane
widget.drop_function = function
gui.trace("DND target created: %s on canvas %s", widget, widget.dnd_canvas)
def _startInternalDrag(self, event, title, name, widget):
""" called when the user initiates an internal drag event """
gui.trace("Internal drag started for %s on %s", title, widget)
x, y = gui.MOUSE_POS_IN_WIDGET(widget, event, False)
width = x / widget.winfo_width()
height = y / widget.winfo_height()
thingToDrag = DraggableWidget(widget.dnd_canvas, title, name, (width, height))
INTERNAL_DND.dnd_start(thingToDrag, event)
def _startExternalDrag(self, event):
""" starts external drags - not yet supported """
widgType = gui.GET_WIDGET_TYPE(event.widget)
self.warn("Unable to initiate drag events: %s", widgType)
def _receiveExternalDrop(self, event):
""" receives external drag events """
widgType = gui.GET_WIDGET_TYPE(event.widget)
event.widget.dropData = event.data
if not hasattr(event.widget, 'dndFunction'):
self.warn("Error - external drop target not correctly configured: %s", widgType)
elif event.widget.dndFunction is not None:
event.widget.dndFunction(event.data)
else:
if widgType in ["Entry", "AutoCompleteEntry", "SelectableLabel"]:
if widgType == "SelectableLabel": event.widget.configure(state="normal")
if event.widget.dropReplace:
event.widget.delete(0, END)
event.widget.insert(END, event.data)
event.widget.focus_set()
event.widget.icursor(END)
if widgType == "SelectableLabel": event.widget.configure(state="readonly")
elif widgType in ["TextArea", "AjText", "ScrolledText", "AjScrolledText"]:
if event.widget.dropReplace:
event.widget.delete(1.0, END)
event.widget.insert(END, event.data)
event.widget.focus_set()
event.widget.see(END)
elif widgType in ["Label"]:
for k, v in self.widgetManager.group(self.Widgets.Image).items():
if v == event.widget:
try:
imgTemp = self.userImages
image = self._getImage(event.data, False)
self._populateImage(k, image)
self.userImages = imgTemp
except:
self.errorBox("Error loading image", "Unable to load image: " + str(event.data))
return
for k, v in self.widgetManager.group(self.Widgets.Label).items():
if v == event.widget:
self.setLabel(k, event.data)
return
elif widgType in ["Listbox"]:
for k, v in self.widgetManager.group(self.Widgets.ListBox).items():
if v == event.widget:
self.addListItem(k, event.data)
return
elif widgType in ["Message"]:
for k, v in self.widgetManager.group(self.Widgets.Message).items():
if v == event.widget:
self.setMessage(k, event.data)
return
else:
self.warn("Unable to receive drop events: %s", widgType)
#####################################
# set the arrays we use to store everything
#####################################
def _initVars(self, reset=False):
# validate function callbacks - used by numeric texts
# created first time a widget is used
self.validateNumeric = None
self.validateSpinBox = None
self.labWidth = {} # minimum label width for label combos
self.doFlash = False # set up flash variable
self.hasTitleBar = True # used to hide/show title bar
self.splashConfig = None # splash screen?
self.winIcon = None # store the path to any icon
self.dnd = None # the dnd manager
self.accessMade = False # accessibility subWindow
# collections of widgets, widget name is key
if not reset: self.widgetManager = WidgetManager()
else: self.widgetManager.reset(self.Widgets.keepers)
#####################################
# Language/Translation functions
#####################################
def translate(self, key, default=None):
""" returns a translated version of the key, using the current language
if none found, returns the default value """
return self._translate(key, "EXTERNAL", default)
def _translateSound(self, key):
""" internal wrapper to translate sounds """
return self._translate(key, "SOUND", key)
def _translatePopup(self, key, value):
""" internal wrapper to translate popups """
pop = self._translate(key, "POPUP")
if pop is None:
return (key, value)
else:
return (pop[0], pop[1])
def _translate(self, key, section, default=None):
""" returns the relevant key from the relevant section in the internally
held translation dictionary - prepopulated when language was set """
if key in self.translations[section]:
return self.translations[section][key]
else:
return default
def getLanguage(self):
''' returns the current language '''
return self._language
def setLanguage(self, language):
""" wrapper for changeLanguage() """
self.changeLanguage(language)
# function to update languages
def changeLanguage(self, language):
""" changes the language used by the GUI
will iterate through all widgets and update their text
as well as populate a translation dictionary for later lookups """
self._loadConfigParser()
if not ConfigParser:
self.error("Internationalisation not supported")
return
fileName = language.upper() + ".ini"
gui.trace("Loading language file: %s", fileName)
if not PYTHON2:
try:
with codecs.open(fileName, "r", "utf8") as langFile:
self.configParser.read_file(langFile)
except FileNotFoundError:
self.error("Invalid language, file not found: %s", fileName)
return
else:
try:
try:
with codecs.open(fileName, "r", "utf8") as langFile:
self.configParser.read_file(langFile)
except AttributeError:
with codecs.open(fileName, "r", "utf8") as langFile:
self.configParser.readfp(langFile)
except IOError:
self.error("Invalid language, file not found: %s", fileName)
return
except ParsingError:
self.error("Translation failed - language file contains errors, ensure there is no whitespace at the beginning of any lines.")
return
gui.trace("Switching to: %s", language)
self._language = language
self.translations = {"POPUP":{}, "SOUND":{}, "EXTERNAL":{}}
# loop through each section, get the relative set of widgets
# change the text
for section in self.configParser.sections():
getWidgets = True
section = section.upper()
gui.trace("\tSection: %s", section)
# convert the section title to its code
if section == "CONFIG":
# skip the config section (for now)
gui.trace("\tSkipping CONFIG")
continue
elif section == "TITLE":
kind = self.Widgets.SubWindow
elif section.startswith("TOOLTIP-"):
kind = "TOOLTIP"
getWidgets = False
elif section in ["SOUND", "EXTERNAL", "POPUP"]:
for (key, val) in self.configParser.items(section):
if section == "POPUP": val = val.strip().split("\n")
self.translations[section][key] = val
gui.trace("\t\t%s: %s", key, val)
continue
elif section == "MENUBAR":
for (key, val) in self.configParser.items(section):
key = key.strip().split("-")
gui.trace("\t\t%s: %s", key, val)
if len(key) == 1:
try:
self.renameMenu(key[0], val)
except:
self.warn("Invalid key")
elif len(key) == 2:
try:
self.renameMenuItem(key[0], key[1], val)
except:
self.warn("Invalid key")
continue
else:
try:
kind = self.Widgets.getIgnoreCase(section)
except Exception:
self.warn("Invalid config section: %s", section)
continue
# if necessary, use the code to get the widget list
if getWidgets:
widgets = self.widgetManager.group(kind)
if kind in [self.Widgets.Scale]:
self.warn("No text is displayed in %s. Maybe it has a Label?", section)
continue
elif kind in [self.Widgets.TextArea, self.Widgets.Meter, self.Widgets.PieChart, self.Widgets.Tree]:
self.warn("No text is displayed in %s", section)
continue
elif kind in [self.Widgets.name(self.Widgets.SubWindow)]:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
if key.lower() == "appjar":
self.setTitle(val)
elif key.lower() == "splash":
if self.splashConfig is not None:
gui.trace("\t\t Updated SPLASH to: %s", val)
self.splashConfig['text'] = val
else:
gui.trace("\t\t No SPLASH to update")
elif key.lower() == "statusbar":
gui.trace("\tSetting STATUSBAR: %s", val)
self.setStatusbarHeader(val)
else:
try:
widgets[key].title(val)
except KeyError:
self.warn("Invalid SUBWINDOW: %s", key)
elif kind in [self.Widgets.ListBox]:
for k in widgets.keys():
lb = widgets[k]
# convert data to a list
if self.configParser.has_option(section, k):
data = self.configParser.get(section, k)
else:
data = lb.DEFAULT_TEXT
data = data.strip().split("\n")
# tidy up the list
data = [item.strip() for item in data if len(item.strip()) > 0]
self.updateListBox(k, data)
elif kind in [self.Widgets.SpinBox]:
for k in widgets.keys():
sb = widgets[k]
# convert data to a list
if self.configParser.has_option(section, k):
data = self.configParser.get(section, k)
else:
data = sb.DEFAULT_TEXT
data = data.strip().split("\n")
# tidy up the list
data = [item.strip() for item in data if len(item.strip()) > 0]
self.changeSpinBox(k, data)
elif kind in [self.Widgets.OptionBox]:
for k in widgets.keys():
ob = widgets[k]
# convert data to a list
if self.configParser.has_option(section, k):
data = self.configParser.get(section, k)
else:
data = ob.DEFAULT_TEXT
data = data.strip().split("\n")
# tidy up the list
data = [item.strip() for item in data if len(item.strip()) > 0]
self.changeOptionBox(k, data)
elif kind in [self.Widgets.RadioButton]:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
keys = key.split("-")
if len(keys) != 2:
self.warn("Invalid RADIOBUTTON key: %s", key)
else:
try:
rbs = self.widgetManager.get(self.Widgets.RadioButton, keys[0])
except KeyError:
self.warn("Invalid RADIOBUTTON key: %s", keys[0])
continue
for rb in rbs:
if rb.DEFAULT_TEXT == keys[1]:
rb["text"] = val
break
elif kind in [self.Widgets.TabbedFrame]:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
keys = key.split("-")
if len(keys) != 2:
self.warn("Invalid TABBEDFRAME key: %s", key)
else:
try:
self.setTabText(keys[0], keys[1], val)
except ItemLookupError:
self.warn("Invalid TABBEDFRAME: %s with TAB: %s" , keys[0], keys[1])
elif kind in [self.Widgets.Properties]:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
keys = key.split("-")
if len(keys) != 2:
self.warn("Invalid PROPERTIES key: %s", key)
else:
try:
self.setPropertyText(keys[0], keys[1], val)
except ItemLookupError:
self.warn("Invalid PROPERTIES: %s", keys[0])
except KeyError:
self.warn("Invalid PROPERTY: %s", keys[1])
elif kind == self.Widgets.Tree:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
keys = key.split("-")
if len(keys) != 2:
self.warn("Invalid GRID key: %s", key)
else:
if keys[1] not in ["actionHeading", "actionButton", "addButton"]:
self.warn("Invalid GRID label: %s for GRID: %s", keys[1], keys[0])
else:
try:
self.confGrid(keys[0], keys[1], val)
except ItemLookupError:
self.warn("Invalid GRID: %s", keys[0])
elif kind == self.PAGEDWINDOW:
for (key, val) in self.configParser.items(section):
gui.trace("\t\t%s: %s", key, val)
keys = key.split("-")
if len(keys) != 2:
self.warn("Invalid PAGEDWINDOW key: %s", key)
else:
if keys[1] not in ["prevButton", "nextButton", "title"]:
self.warn("Invalid PAGEDWINDOW label: %s for PAGEDWINDOW: %s", keys[1], keys[0])
else:
try:
widgets[keys[0]].config(**{keys[1]:val})
except KeyError:
self.warn("Invalid PAGEDWINDOW: %s", keys[0])
elif kind == self.Widgets.Entry:
for k in widgets.keys():
ent = widgets[k]
if self.configParser.has_option(section, k):
data = self.configParser.get(section, k)
else:
data = ent.DEFAULT_TEXT
gui.trace("\t\t%s: %s", k, data)
self.setEntryDefault(k, data)
elif kind in [self.Widgets.Image]:
for k in widgets.keys():
if self.configParser.has_option(section, k):
data = str(self.configParser.get(section, k))
try:
self.setImage(k, data)
gui.trace("\t\t%s: %s", k, data)
except:
self.error("Failed to update image: %s to: %s", k, data)
else:
gui.trace("No translation for: %s", k)
elif kind in [self.Widgets.Label, self.Widgets.Button, self.Widgets.CheckBox, self.Widgets.Message,
self.Widgets.Link, self.Widgets.LabelFrame, self.TOGGLEFRAME]:
for k in widgets.keys():
widg = widgets[k]
# skip validation labels - we don't need to translate them
try:
if kind == self.Widgets.Label and widg.isValidation:
gui.trace("\t\t%s: skipping, validation label", k)
continue
except:
pass
if self.configParser.has_option(section, k):
data = str(self.configParser.get(section, k))
else:
data = widg.DEFAULT_TEXT
gui.trace("\t\t%s: %s", k, data)
widg.config(text=data)
elif kind == self.Widgets.Toolbar:
for k in widgets.keys():
but = widgets[k]
if but.image is None:
if self.configParser.has_option(section, k):
data = str(self.configParser.get(section, k))
else:
data = but.DEFAULT_TEXT
gui.trace("\t\t%s: %s", k, data)
but.config(text = data)
elif kind == "TOOLTIP":
try:
kind = self.Widgets.name(self.Widgets.getIgnoreCase(section.split("-")[1]))
func = getattr(self, "set"+kind+"Tooltip")
except KeyError:
self.warn("Invalid config section: TOOLTIP-%s", section)
return
gui.trace("Parsing TOOLTIPs for: %s", kind)
for (key, val) in self.configParser.items(section):
try:
func(key, val)
except ItemLookupError:
self.warn("Invalid TOOLTIP for: %s, with key: %s", kind, key)
continue
else:
self.warn("Unsupported widget: %s", section)
continue
language = property(getLanguage, changeLanguage)
def showSplash(self, text="appJar", fill="#FF0000", stripe="#000000", fg="#FFFFFF", font=44):
""" creates a splash screen to show at start up """
self.splashConfig= {'text':text, 'fill':fill, 'stripe':stripe, 'fg':fg, 'font':font}
##################################################
### Stuff for logging
##################################################
@staticmethod
def setLogFile(fileName):
""" sets the filename for logging messages """
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.INFO, filename=fileName, format='%(asctime)s %(name)s:%(levelname)s: %(message)s')
gui.info("Switched to logFile: %s", fileName)
def _setLogFile(self, fileName):
''' necessary so we can access this as a property '''
gui.setLogFile(fileName)
def getLogFile(self):
return logging.root.handlers[0].baseFilename
logFile = property(getLogFile, _setLogFile)
@staticmethod
def setLogLevel(level):
""" main function for setting the logging level
provide one of: INFO, DEBUG, WARNING, ERROR, CRITICAL, EXCEPTION, None """
logging.getLogger("appJar").setLevel(getattr(logging, level.upper()))
gui.info("Log level changed to: %s", level)
def getLogLevel(self):
return logging.getLevelName(logging.getLogger("appJar").getEffectiveLevel())
def _setLogLevel(self, level):
''' necessary so we can access this as a property '''
gui.setLogLevel(level)
logLevel = property(getLogLevel, _setLogLevel)
@staticmethod
def exception(message, *args):
""" wrapper for logMessage - setting level to EXCEPTION """
gui.logMessage(message, "EXCEPTION", *args)
@staticmethod
def critical(message, *args):
""" wrapper for logMessage - setting level to CRITICAL """
gui.logMessage(message, "CRITICAL", *args)
@staticmethod
def error(message, *args):
""" wrapper for logMessage - setting level to ERROR """
gui.logMessage(message, "ERROR", *args)
@staticmethod
def warn(message, *args):
""" wrapper for logMessage - setting level to WARNING """
gui.logMessage(message, "WARNING", *args)
@staticmethod
def debug(message, *args):
""" wrapper for logMessage - setting level to DEBUG """
gui.logMessage(message, "DEBUG", *args)
@staticmethod
def trace(message, *args):
""" wrapper for logMessage - setting level to TRACE """
gui.logMessage(message, "TRACE", *args)
@staticmethod
def info(message, *args):
""" wrapper for logMessage - setting level to INFO """
gui.logMessage(message, "INFO", *args)
@staticmethod
def logMessage(msg, level, *args):
""" allows user to log a message - provide a message and a log level
any %s tags in the message will be replaced by the relevant positional *args """
frames = inspect.stack()
# try to ensure we only log extras if we're called from above functions
if frames[1][3] in ("exception", "critical", "error", "warn", "debug", "trace", "info"):
callFrame = ""
try:
progName = gui.exe_file
for s in frames:
if progName in s[1]:
callFrame = s
break
except: pass
if callFrame != "":
callFrame = "Line " + str(callFrame[2])
# user generated call
if "appjar.py" not in frames[2][1] or frames[2][3] == "handlerFunction":
if callFrame != "":
msg = "[" + callFrame + "]: "+str(msg)
# appJar logging
else:
if callFrame != "":
msg = "["+callFrame + "->" + str(frames[2][2]) +"/"+str(frames[2][3])+"]: "+str(msg)
else:
msg = "["+str(frames[2][2]) +"/"+str(frames[2][3])+"]: "+str(msg)
logger = logging.getLogger("appJar")
level = level.upper()
if level == "EXCEPTION": logger.exception(msg, *args)
elif level == "CRITICAL": logger.critical(msg, *args)
elif level == "ERROR": logger.error(msg, *args)
elif level == "WARNING": logger.warning(msg, *args)
elif level == "INFO": logger.info(msg, *args)
elif level == "DEBUG": logger.debug(msg, *args)
elif level == "TRACE": logger.trace(msg, *args)
##############################################################
# Event Loop - must always be called at end
##############################################################
def __enter__(self):
""" allows gui to be used as a ContextManager """
gui.trace("ContextManager: initialised")
return self
def __exit__(self, eType, eValue, eTrace):
""" allows gui to be used as a ContextManager
- calls the go() function """
if eType is not None:
self.error("ContextManager failed: %s", eValue)
return False
else:
gui.trace("ContextManager: starting")
self.go(startWindow=self.startWindow)
return True
def go(self, language=None, startWindow=None):
""" Most important function! starts the GUI """
# check if we have a command line language
if self._language is not None:
language = self._language
# if language is populated, we are in internationalisation mode
# call the changeLanguage function - to re-badge all the widgets
if language is not None:
self.changeLanguage(language)
if self.splashConfig is not None:
gui.trace("SPLASH: %s", self.splashConfig)
splash = SplashScreen(
self.topLevel,
self.splashConfig['text'],
self.splashConfig['fill'],
self.splashConfig['stripe'],
self.splashConfig['fg'],
self.splashConfig['font']
)
self.topLevel.withdraw()
self._bringToFront(splash)
# check the containers have all been stopped
if len(self.containerStack) > 1:
for i in range(len(self.containerStack) - 1, 0, -1):
kind = self.containerStack[i]['type']
if kind != self.Widgets.Pane:
self.warn("No stopContainer called on: %s", self.Widgets.name(kind))
# update any trees
for k in self.widgetManager.group(self.Widgets.Tree):
self.generateTree(k)
# create appJar menu, if no menuBar created
if not self.hasMenu:
self.addAppJarMenu()
if self.platform == self.WINDOWS:
self.menuBar.add_cascade(menu=self.widgetManager.get(self.Widgets.Menu, "WIN_SYS"))
self.topLevel.config(menu=self.menuBar)
if startWindow is not None:
self.startWindow = startWindow
gui.trace("startWindow parameter: %s", startWindow)
# pack it all in & make sure it's drawn
self.appWindow.pack(fill=BOTH)
if self.useSettings:
self.loadSettings(self.settingsFile)
self.topLevel.update_idletasks()
# check geom is set and set a minimum size, also positions the window if necessary
if not self.topLevel.locationSet:
self.setLocation('CENTER')
if not hasattr(self.topLevel, 'ms'):
self.setMinSize()
if self.splashConfig is not None:
time.sleep(3)
splash.destroy()
# user hasn't specified anything
if self.startWindow is None:
if not self.topLevel.displayed:
gui.trace("topLevel has been manually hidden - not showing in go()")
else:
gui.trace("Showing topLevel")
self._bringToFront()
self.topLevel.deiconify()
else:
gui.trace("hiding main window")
self.hide()
sw = self.widgetManager.get(self.Widgets.SubWindow, startWindow)
if sw.blocking:
raise Exception("Unable to start appjar with a blocking subWindow")
self.showSubWindow(startWindow)
# required to make the gui reopen after minimising
if self.GET_PLATFORM() == self.MAC:self.topLevel.createcommand('tk::mac::ReopenApplication', self._macReveal)
# start the call back & flash loops
self._poll()
self._flash()
# register start-up function
if self.topLevel.startFunction is not None:
self.topLevel.after_idle(self.topLevel.startFunction)
# start the main loop
try:
self.topLevel.mainloop()
except(KeyboardInterrupt, SystemExit) as e:
gui.trace("appJar stopped through ^c or exit()")
self.stop()
except Exception as e:
self.exception(e)
self.stop()
def setStartFunction(self, func):
f = self.MAKE_FUNC(func, "start")
self.topLevel.startFunction = f
startFunction = property(fset=setStartFunction)
def _macReveal(self):
""" internal function to deiconify GUIs on mac """
if self.topLevel.state() != "withdrawn":
self.topLevel.deiconify()
for k, v in self.widgetManager.group(self.Widgets.SubWindow).items():
if v.state() == "normal":
self.showSubWindow(k)
def setStopFunction(self, function):
""" Set a function to call when the GUI is quit. Must return True or False """
tl = self._getTopLevel()
tl.stopFunction = function
# link to exit item in topMenu
# only if in root
if self._getContainerProperty('type') != self.Widgets.SubWindow:
tl.createcommand('exit', self.stop)
stopFunction = property(fset=setStopFunction)
def setSetting(self, name, value):
""" adds a setting to the settings file """
self.externalSettings[name] = value
def getSetting(self, name, default=None):
""" gets a setting form the settings file """
try: return self.externalSettings[name]
except: return default
def saveSettings(self, fileName="appJar.ini"):
""" saves the current settings to a file
called automatically by stop() of settings were loaded at start """
self._loadConfigParser()
if not ConfigParser:
self.error("Unable to save config file - no configparser")
return
settings = ConfigParser()
settings.optionxform = str
settings.add_section('GEOM')
geom = self.topLevel.geometry()
ms = self.topLevel.minsize()
ms = "%s,%s" % (ms[0], ms[1])
settings.set('GEOM', 'geometry', geom)
gui.trace("Save geom as: %s", geom)
settings.set('GEOM', 'minsize', ms)
settings.set('GEOM', "fullscreen", str(self.topLevel.attributes('-fullscreen')))
settings.set('GEOM', "state", str(self.topLevel.state()))
# get toolbar setting
if self.hasTb:
gui.trace("Saving toolbar settings")
settings.add_section("TOOLBAR")
settings.set("TOOLBAR", "pinned", str(self.tbPinned))
# get container settings
for k, v in self.widgetManager.group(self.Widgets.ToggleFrame).items():
gui.trace("Saving toggle %s", k)
if "TOGGLES" not in settings.sections(): settings.add_section("TOGGLES")
settings.set("TOGGLES", k, str(v.isShowing()))
for k, v in self.widgetManager.group(self.Widgets.TabbedFrame).items():
gui.trace("Saving tab %s", k)
if "TABS" not in settings.sections(): settings.add_section("TABS")
settings.set("TABS", k, str(v.getSelectedTab()))
for k, v in self.widgetManager.group(self.Widgets.PagedWindow).items():
gui.trace("Saving page %s", k)
if "PAGES" not in settings.sections(): settings.add_section("PAGES")
settings.set("PAGES", k, str(v.getPageNumber()))
for k, v in self.widgetManager.group(self.Widgets.SubWindow).items():
if "SUBWINDOWS" not in settings.sections(): settings.add_section("SUBWINDOWS")
if v.shown:
v.update()
settings.set("SUBWINDOWS", k, "True")
settings.add_section(k)
settings.set(k, "geometry", v.geometry())
ms = v.minsize()
settings.set(k, 'minsize', "%s,%s" % (ms[0], ms[1]))
settings.set(k, "state", v.state())
gui.trace("Saving subWindow %s: geom=%s, state=%s, minsize=%s", k, v.geometry(), v.state(), ms)
else:
settings.set("SUBWINDOWS", k, "False")
gui.trace("Skipping subwindow: %s", k)
for k, v in self.externalSettings.items():
if "EXTERNAL" not in settings.sections(): settings.add_section("EXTERNAL")
settings.set("EXTERNAL", k, str(v))
# pane positions?
# sub windows geom & visibility
# scrollpane x & y positions
# language
# ttk
# debug level
with open(fileName, 'w') as theFile:
settings.write(theFile)
def loadSettings(self, fileName="appJar.ini", useSettings=True):
""" loads setting from a settings file, and adjusts the GUI to match
called by go() function, if user has requested settings """
self._loadConfigParser()
if not ConfigParser:
self.error("Unable to save config file - no configparser")
return
self.useSettings = useSettings
settings = ConfigParser()
settings.optionxform = str
settings.read(fileName)
if settings.has_option("GEOM", "geometry"):
geom = settings.get("GEOM", "geometry")
if not self.topLevel.ignoreSettings:
size, loc = gui.SPLIT_GEOM(geom)
gui.trace("Setting topLevel geom: %s as size: %s, loc: %s", geom, size, loc)
if size[0] > 1:
self.setSize(*size)
if loc[0] != -1:
self.setLocation(*loc)
else:
gui.trace("Ignoring topLevel geom: %s", geom)
# not finished
if settings.has_option("GEOM", "fullscreen"):
fs = settings.getboolean('GEOM', "fullscreen")
gui.trace("Set fullscreen to: %s", fs)
if fs: self.setFullscreen()
else: self.exitFullscreen()
if settings.has_option("GEOM", "minsize"):
self.topLevel.ms = settings.get('GEOM', "minsize").split(",")
self._getTopLevel().minsize(self.topLevel.ms[0], self.topLevel.ms[1])
gui.trace("Set minsize to: %s", self.topLevel.ms)
if settings.has_option("GEOM", "state"):
state = settings.get('GEOM', "state")
if state in ["withdrawn", "zoomed"]:
self._getTopLevel().state(state)
if settings.has_option("TOOLBAR", "pinned") and self.hasTb:
tb = settings.getboolean("TOOLBAR", "pinned")
self.setToolbarPinned(tb)
gui.trace("Set toolbar to: %s", tb)
if "TOGGLES" in settings.sections():
for k in settings.options("TOGGLES"):
try:
if self.getToggleFrameState(k) != settings.getboolean("TOGGLES", k):
self.toggleToggleFrame(k)
except ItemLookupError:
gui.error("Settings error, invalid TOGGLES name: %s - discarding", k)
if "TABS" in settings.sections():
for k in settings.options("TABS"):
try:
self.setTabbedFrameSelectedTab(k, settings.get("TABS", k))
except ItemLookupError:
gui.error("Settings error, invalid TABS name: %s - discarding", k)
if "PAGES" in settings.sections():
for k in settings.options("PAGES"):
try:
self.setPagedWindowPage(k, settings.getint("PAGES", k))
except ItemLookupError:
gui.error("Settings error, invalid PAGES name: %s - discarding", k)
if "SUBWINDOWS" in settings.sections():
for k in settings.options("SUBWINDOWS"):
if settings.getboolean("SUBWINDOWS", k):
gui.trace("Loading settings for %s", k)
try:
tl = self.widgetManager.get(self.Widgets.SubWindow, k)
# process the geom settings
if settings.has_option(k, "geometry"):
geom = settings.get(k, "geometry")
size, loc = gui.SPLIT_GEOM(geom)
if size[0] > 1:
gui.trace("Setting size: %s", size)
tl.geometry("%sx%s" % (size[0], size[1]))
tl.shown = True
else:
gui.trace("Skipping size: %s", size)
if loc[0] > -1:
gui.trace("Setting location: %s", loc)
self.setSubWindowLocation(k, *loc)
else:
gui.trace("Skipping location: %s", loc)
else:
gui.trace("No location found")
if settings.has_option(k, "minsize"):
ms = settings.get(k, "minsize").split(",")
self.setMinSize(tl, ms)
# set the state - if there' no startWindow
if self.startWindow is None:
try:
tl.state(settings.get(k, "state"))
gui.trace("Set state=%s", tl.state())
except: pass # no state found
except ItemLookupError:
gui.error("Settings error, invalid SUBWINDOWS name: %s - discarding.", k)
else:
gui.trace("Skipping settings for %s", k)
if "EXTERNAL" in settings.sections():
for k in settings.options("EXTERNAL"):
self.externalSettings[k] = settings.get("EXTERNAL", k)
def stop(self, event=None):
""" Closes the GUI. If a stop function is set, will only close the GUI if True """
theFunc = self._getTopLevel().stopFunction
if theFunc is None or theFunc():
if self.useSettings:
self.saveSettings(self.settingsFile)
# stop the after loops
self.alive = False
self.topLevel.after_cancel(self.pollId)
self.topLevel.after_cancel(self.flashId)
if self.preloadAnimatedImageId:
self.topLevel.after_cancel(self.preloadAnimatedImageId)
if self.processQueueId:
self.topLevel.after_cancel(self.processQueueId)
# stop any animations
for key in self.widgetManager.group(self.Widgets.AnimationID):
self.topLevel.after_cancel(self.widgetManager.get(self.Widgets.AnimationID, key))
# stop any maps
for key in self.widgetManager.group(self.Widgets.Map):
self.widgetManager.get(self.Widgets.Map, key).stopUpdates()
# stop any sounds, ignore error when not on Windows
try:
self.stopSound()
except:
pass
self.topLevel.quit()
if not self.fastStop: self.topLevel.destroy()
self.__class__.instantiated = False
gui.info("--- GUI stopped ---")
def setFastStop(self, fast=True):
self._fastStop = fast
def getFastStop(self):
return self._fastStop
fastStop = property(getFastStop, setFastStop)
#####################################
# Functions for configuring polling events
#####################################
def setPollTime(self, time):
""" Set a frequency for executing queued functions
events will fire in order of being added, after sleeping for time """
self.pollTime = time
def registerEvent(self, func):
""" Queue a function, to be executed every poll time """
self.events.append(func)
def after(self, delay_ms, callback=None, *args):
""" wrapper for topLevel after function
schedules the callback function to happen in x seconds
returns an ID, allowing the event to be cancelled """
return self.topLevel.after(delay_ms, callback, *args)
def afterIdle(self, callback, *args):
""" wrapper for topLevel after_idle function
schedules the callback function to happen in x seconds
returns an ID, allowing the event to be cancelled """
return self.after_idle(callback, *args)
def after_idle(self, callback, *args):
""" wrapper for topLevel after_idle function
schedules the callback function to happen in x seconds
returns an ID, allowing the event to be cancelled """
return self.topLevel.after_idle(callback, *args)
def afterCancel(self, afterId):
""" wrapper for topLevel after_cancel function
tries to cancel the specified callback """
return self.after_cancel(afterId)
def after_cancel(self, afterId):
""" wrapper for topLevel after_cancel function
tries to cancel the specified callback """
return self.topLevel.after_cancel(afterId)
def queueFunction(self, func, *args, **kwargs):
""" adds the specified function & arguments to the event queue
Functions in the event queue are actioned by the gui's main thread
:param func: the function to call
:param *args: any number of ordered arguments
:param **kwargs: any number of named arguments
:raises Full: if unable to add the function to the queue
"""
self._loadThreading()
if Queue is False:
gui.warn("Unable to queueFunction - threading not possible.")
else:
self.eventQueue.put((5, func, args, kwargs), block=False)
def queuePriorityFunction(self, func, *args, **kwargs):
""" queues the function with a higher priority - not working yet """
self._loadThreading()
if Queue is False:
gui.warn("Unable to queueFunction - threading not possible.")
else:
self.eventQueue.put((1, func, args, kwargs), block=False)
def _processEventQueue(self):
""" internal function to process events in the event queue
put there by queue function """
if not self.alive: return
if not self.eventQueue.empty():
priority, func, args, kwargs = self.eventQueue.get()
gui.trace("FUNCTION: %s(%s)", func, args)
func(*args, **kwargs)
self.processQueueId = self.after(self.EVENT_SPEED, self._processEventQueue)
def thread(self, func, *args, **kwargs):
""" will run the supplied function in a separate thread
param func: the function to run
"""
self._loadThreading()
if Queue is False:
gui.warn("Unable to queueFunction - threading not possible.")
else:
t = Thread(group=None, target=func, name=None, args=args, kwargs=kwargs)
t.daemon = True
t.start()
def callback(self, *args, **kwargs):
"""Shortner for threadCallback."""
return self.threadCallback(*args, **kwargs)
def threadCallback(self, func, callback, *args, **kwargs):
"""Run a given method in a new thread with passed arguments.
When func completes call the callback with the result.
:param func: Method that returns the result.
:param callback: Method that receives the result.
:param args: Positional arguments for func.
:param kwargs: Keyword args for func.
"""
def innerThread(func, callback, *args, **kwargs):
result = func(*args, **kwargs)
self.queueFunction(callback, result)
if not callable(func) or not callable(callback):
gui.error("Function (or callback) method isn't callable!")
return
self.thread(innerThread, func, callback, *args, **kwargs)
# internal function, called by 'after' function, after sleeping
def _poll(self):
""" internal function, called by 'after' function, after sleeping """
if not self.alive: return
# run any registered actions
for e in self.events:
# execute the event
e()
self.pollId = self.topLevel.after(self.pollTime, self._poll)
def _windowEvent(self, event):
""" called whenever the GUI updates - does nothing """
new_width = self.topLevel.winfo_width()
new_height = self.topLevel.winfo_height()
# gui.trace("Window resized: %sx%s", new_width, new_height)
def enableEnter(self, func):
""" Binds <Return> to the specified function - all widgets """
self.bindKey("<Return>", func)
def disableEnter(self):
""" unbinds <Return> from all widgets """
self.unbindKey("<Return>")
def _enterWrapper(self, func):
if func is None:
self.disableEnter()
else:
self.enableEnter(func)
enterKey = property(fset=_enterWrapper)
def bindKeys(self, keys, func):
""" bind the specified keys, to the specified function, for all widgets """
for key in keys:
self.bindKey(key, func)
def bindKey(self, key, func):
""" bind the specified key, to the specified function, for all widgets """
# for now discard the Event...
myF = self.MAKE_FUNC(func, key)
self._getTopLevel().bind(key, myF)
def unbindKeys(self, keys):
""" unbinds the specified keys from whatever functions they are bound to """
for key in keys:
self.unbindKey(key)
def unbindKey(self, key):
""" unbinds the specified key from whatever functions it is bound to """
self._getTopLevel().unbind(key)
def _isMouseInWidget(self, w):
""" helper - returns True if the mouse is in the specified widget """
l_x = w.winfo_rootx()
l_y = w.winfo_rooty()
if l_x <= w.winfo_pointerx() <= l_x + \
w.winfo_width() and l_y <= w.winfo_pointery() <= l_y + w.winfo_height():
return True
else:
return False
# function to give a clicked widget the keyboard focus
def _grabFocus(self, e):
""" gives the specified widget the focus """
e.widget.focus_set()
#####################################
# FUNCTIONS for configuring GUI settings
#####################################
def setSize(self, geom, height=None, ignoreSettings=None):
""" called to update screen geometry
can take a geom string, or a width & height
can override ignoreSettings if desired """
container = self._getTopLevel()
if ignoreSettings is not None:
container.ignoreSettings = ignoreSettings
if geom == "fullscreen":
self.setFullscreen()
elif geom is not None:
if height is not None:
geom=(geom, height)
elif not isinstance(geom, list) and not isinstance(geom, tuple):
geom, loc = gui.SPLIT_GEOM(geom)
size = "%sx%s" % (int(geom[0]), int(geom[1]))
gui.trace("Setting size: %s", size)
# warn the user that their geom is not big enough
dims = gui.GET_DIMS(container)
if geom[0] < dims["b_width"] or geom[1] < dims["b_height"]:
self.warn("Specified dimensions (%s, %s) less than requested dimensions (%s, %s)",
geom[0], geom[1], dims["b_width"], dims["b_height"])
# and set it as the minimum size
if not hasattr(container, 'ms'):
self.setMinSize(container, geom)
self.exitFullscreen()
container.geometry(size)
def getSize(self):
container = self._getTopLevel()
size, loc = gui.SPLIT_GEOM(container.geometry())
return size
size = property(getSize, setSize)
def setMinSize(self, container=None, size=None):
""" sets a minimum size for the specified container - defaults to the whole GUI """
if container is None: container = self.topLevel
if size is None: size = (gui.GET_DIMS(container)["r_width"], gui.GET_DIMS(container)["r_height"])
container.ms = size
container.minsize(size[0], size[1])
gui.trace("Minsize set to: %s", size)
def setLocation(self, x, y=None, ignoreSettings=None, win=None, up=0):
""" called to set the GUI's position on screen """
if win is None:
win = self._getTopLevel()
gui.SET_LOCATION(x, y, ignoreSettings, win, up)
def getLocation(self):
container = self._getTopLevel()
size, loc = gui.SPLIT_GEOM(container.geometry())
return loc
location = property(getLocation, setLocation)
def _bringToFront(self, win=None):
""" called to make sure this window is on top of other windows """
if win is None:
win = self.topLevel
top = self.top
else:
top = win.attributes('-topmost')
if self.platform == self.MAC:
import subprocess
tmpl = 'tell application "System Events" to set frontmost of every process whose unix id is {0} to true'
script = tmpl.format(os.getpid())
subprocess.check_call(['/usr/bin/osascript', '-e', script])
win.after( 0, lambda: win.attributes("-topmost", top))
# val=os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "''' + PY_NAME + '''" to true' ''')
win.lift()
elif self.platform == self.WINDOWS:
win.lift()
elif self.platform == self.LINUX:
win.lift()
def setFullscreen(self, title=None):
""" sets the specified window to be fullscreen
if no title, will set the main GUI """
try:
container = self.widgetManager.get(self.Widgets.SubWindow, title)
except:
container = self._getTopLevel()
if not container.isFullscreen:
container.isFullscreen = True
container.attributes('-fullscreen', True)
container.escapeBindId = container.bind('<Escape>', self.MAKE_FUNC(self.exitFullscreen, container), "+")
def getFullscreen(self, title=None):
if title is None:
container = self._getTopLevel()
else:
container = self.widgetManager.get(self.Widgets.SubWindow, title)
return container.isFullscreen
def setOnTop(self, stay=True):
self._getTopLevel().attributes("-topmost", stay)
gui.trace("Staying on top set to: %s", stay)
def getOnTop(self):
return self._getTopLevel().attributes("-topmost") == 1
top = property(getOnTop, setOnTop)
def _changeFullscreen(self, flag):
if flag: self.setFullscreen()
else: self.exitFullscreen()
fullscreen = property(getFullscreen, _changeFullscreen)
def exitFullscreen(self, container=None):
""" turns off fullscreen mode for the specified window """
if container is None or isinstance(container, STRING):
try:
container = self.widgetManager.get(self.Widgets.SubWindow, container)
except:
container = self._getTopLevel()
if container.isFullscreen:
container.isFullscreen = False
container.attributes('-fullscreen', False)
if container.escapeBindId is not None:
container.unbind('<Escape>', container.escapeBindId)
with PauseLogger():
self._doTitleBar()
return True
else:
return False
def setPadX(self, x=0):
""" set the current container's external grid padding """
self.containerStack[-1]['padx'] = x
def setPadY(self, y=0):
""" set the current container's external grid padding """
self.containerStack[-1]['pady'] = y
def setPadding(self, x, y=None):
""" sets the padding around the border of the current container """
x, y = gui.PARSE_TWO_PARAMS(x, y)
self.containerStack[-1]['padx'] = x
self.containerStack[-1]['pady'] = y
def getPadding(self):
return self._getContainerProperty('padx'), self._getContainerProperty('pady')
padding = property(getPadding, setPadding)
def config(self, **kwargs):
self.configure(**kwargs)
def configure(self, **kwargs):
title = kwargs.pop("title", None)
icon = kwargs.pop("icon", None)
transparency = kwargs.pop("transparency", None)
visible = kwargs.pop("visible", None)
top = kwargs.pop("top", None)
padding = kwargs.pop("padding", None)
inPadding = kwargs.pop("inPadding", None)
guiPadding = kwargs.pop("guiPadding", None)
size = kwargs.pop("size", None)
location = kwargs.pop("location", None)
fullscreen = kwargs.pop("fullscreen", None)
resizable = kwargs.pop("resizable", None)
sticky = kwargs.pop("sticky", None)
stretch = kwargs.pop("stretch", None)
expand = kwargs.pop("expand", None)
row = kwargs.pop("row", None)
fg = kwargs.pop("fg", None)
bg = kwargs.pop("bg", None)
font = kwargs.pop("font", None)
buttonFont = kwargs.pop("buttonFont", None)
labelFont = kwargs.pop("labelFont", None)
inputFont = kwargs.pop("inputFont", None)
ttkTheme = kwargs.pop("ttkTheme", None)
editMenu = kwargs.pop("editMenu", None)
# two possible names
stopFunction = kwargs.pop("stop", kwargs.pop("stopFunction", None))
startFunction = kwargs.pop("start", kwargs.pop("startFunction", None))
fastStop = kwargs.pop("fastStop", None)
enterKey = kwargs.pop("enterKey", None)
logLevel = kwargs.pop("log", kwargs.pop("logLevel", None))
logFile = kwargs.pop("file", kwargs.pop("logFile", None))
language = kwargs.pop("language", None)
for k, v in kwargs.items():
gui.error("Invalid config parameter: %s, %s", k, v)
if title is not None: self.title = title
if icon is not None: self.icon = icon
if transparency is not None: self.transparency = transparency
if visible is not None: self.visible = visible
if top is not None: self.top = top
if padding is not None: self.padding = padding
if inPadding is not None: self.inPadding = inPadding
if guiPadding is not None: self.guiPadding = guiPadding
if size is not None: self.size = size
if location is not None: self.location = location
if fullscreen is not None: self.fullscreen = fullscreen
if resizable is not None: self.resizable = resizable
if sticky is not None: self.sticky = sticky
if expand is not None: self.expand = expand
if stretch is not None: self.stretch = stretch
if row is not None: self.row = row
if fg is not None: self.fg = fg
if bg is not None: self.bg = bg
if font is not None: self.font = font
if labelFont is not None: self.labelFont = labelFont
if buttonFont is not None: self.buttonFont = buttonFont
if inputFont is not None: self.inputFont = inputFont
if ttkTheme is not None: self.ttkTheme = ttkTheme
if editMenu is not None: self.editMenu = editMenu
if stopFunction is not None: self.stopFunction = stopFunction
if startFunction is not None: self.startFunction = startFunction
if fastStop is not None: self.fastStop = fastStop
if enterKey is not None: self.enterKey = enterKey
if logLevel is not None: self.logLevel = logLevel
if logFile is not None: self.logFile = logFile
if language is not None: self.language = language
def setGuiPadding(self, x, y=None):
""" sets the padding around the border of the GUI """
x, y = gui.PARSE_TWO_PARAMS(x, y)
self.containerStack[0]['container'].config(padx=x, pady=y)
def getGuiPadding(self):
return int(str(self.containerStack[0]['container'].cget('padx'))), int(str(self.containerStack[0]['container'].cget('pady')))
guiPadding = property(getGuiPadding, setGuiPadding)
# sets the current containers internal padding
def setIPadX(self, x=0):
self.setInPadX(x)
def setIPadY(self, y=0):
self.setInPadY(y)
def setIPadding(self, x, y=None):
self.setInPadding(x, y)
def setInPadX(self, x=0):
self.containerStack[-1]['ipadx'] = x
def setInPadY(self, y=0):
self.containerStack[-1]['ipady'] = y
def setInPadding(self, x, y=None):
x, y = gui.PARSE_TWO_PARAMS(x, y)
self.containerStack[-1]['ipadx'] = x
self.containerStack[-1]['ipady'] = y
def getInPadding(self):
return self._getContainerProperty('ipadx'), self._getContainerProperty('ipady')
inPadding = property(getInPadding, setInPadding)
# set an override sticky for this container
def setSticky(self, sticky):
self.containerStack[-1]['sticky'] = sticky
def getSticky(self):
return self._getContainerProperty('sticky')
# property for setTitle
sticky = property(getSticky, setSticky)
# this tells widgets what to do when GUI is resized
def setStretch(self, exp):
self.setExpand(exp)
def getStretch(self):
return self.getExpand()
stretch = property(getStretch, setStretch)
def getExpand(self):
return self._getContainerProperty('expand')
def setExpand(self, exp):
if exp.lower() == "none":
self.containerStack[-1]['expand'] = "NONE"
elif exp.lower() == "row":
self.containerStack[-1]['expand'] = "ROW"
elif exp.lower() == "column":
self.containerStack[-1]['expand'] = "COLUMN"
else:
self.containerStack[-1]['expand'] = "ALL"
expand = property(getExpand, setExpand)
def RANDOM_COLOUR(self):
return self.getRandomColour()
def getRandomColour(self):
""" generates a random colour """
self._loadRandom()
de=("%02x"%random.randint(0,255))
re=("%02x"%random.randint(0,255))
we=("%02x"%random.randint(0,255))
return "#"+de+re+we
randomColour = property(getRandomColour)
def getFonts(self):
fonts = list(tkFont.families())
fonts.sort()
return fonts
fonts = property(getFonts)
def increaseFont(self):
self.increaseLabelFont()
self.increaseButtonFont()
def decreaseFont(self):
self.decreaseLabelFont()
self.decreaseButtonFont()
def increaseButtonFont(self):
self.setButtonFont(size=self._buttonFont['size'] + 1)
def decreaseButtonFont(self):
self.setButtonFont(size=self._buttonFont['size'] - 1)
def increaseLabelFont(self):
self.setLabelFont(size=self._labelFont['size'] + 1)
def decreaseLabelFont(self):
self.setLabelFont(size=self._labelFont['size'] - 1)
def setFont(self, *args, **kwargs):
self.setInputFont(*args, **kwargs)
self.setLabelFont(*args, **kwargs)
self.setButtonFont(*args, **kwargs)
def getFont(self):
return self._getContainerProperty('labelFont').actual()
font = property(getFont, setFont)
def _fontHelper(self, font, *args, **kwargs):
if len(args) > 0:
if isinstance(args[0], int):
kwargs={'size':args[0]}
elif isinstance(args[0], dict):
kwargs=args[0]
elif isinstance(args[0], tkFont.Font):
gui.trace("%s set to new object", font)
self.containerStack[-1][font]=args[0]
return None
self._getContainerProperty(font).config(**kwargs)
if 'family' in kwargs and kwargs['family'] != self._getContainerProperty(font).actual()['family']:
gui.error("Failed to adjust %s to %s.", font, kwargs['family'])
return kwargs
def setInputFont(self, *args, **kwargs):
self._fontHelper('inputFont', *args, **kwargs)
def getInputFont(self):
return self._getContainerProperty('inputFont').actual()
inputFont = property(getInputFont, setInputFont)
def setButtonFont(self, *args, **kwargs):
self._fontHelper('buttonFont', *args, **kwargs)
def getButtonFont(self):
return self._getContainerProperty('buttonFont').actual()
buttonFont = property(getButtonFont, setButtonFont)
def setLabelFont(self, *args, **kwargs):
kwargs = self._fontHelper('labelFont', *args, **kwargs)
if kwargs is not None:
self.tableFont.config(**kwargs)
# need better way to register font change events on tables
for k, v in self.widgetManager.group(self.Widgets.Table).items():
v.config(font=self.tableFont)
linkArgs = kwargs.copy()
linkArgs['underline'] = True
linkArgs['weight'] = 'bold'
self._linkFont.config(**linkArgs)
def getLabelFont(self):
return self._getContainerProperty('labelFont').actual()
labelFont = property(getLabelFont, setLabelFont)
# need to set a default colour for container
# then populate that field
# then use & update that field accordingly
# all widgets will then need to use it
# and here we update all....
def setFg(self, colour, override=False):
if not self.ttkFlag:
self.containerStack[-1]['fg']=colour
gui.SET_WIDGET_FG(self._getContainerProperty('container'), colour, override)
for child in self._getContainerProperty('container').winfo_children():
if not self._isWidgetContainer(child):
gui.SET_WIDGET_FG(child, colour, override)
else:
gui.trace("In ttk mode - trying to set FG to %s", colour)
self.ttkStyle.configure("TLabel", foreground=colour)
self.ttkStyle.configure("TFrame", foreground=colour)
def getBg(self):
if self._getContainerProperty('type') == self.Widgets.RootPage:
return self.bgLabel.cget("bg")
else:
return self._getContainerProperty('container').cget("bg")
def getFg(self):
return self._getContainerProperty("fg")
fg = property(getFg, setFg)
# self.topLevel = Tk()
# self.appWindow = CanvasDnd, fills all of self.topLevel
# self.tb = Frame, at top of appWindow
# self.container = Frame, at bottom of appWindow => C_ROOT container
# self.bglabel = Label, filling all of container
def setBg(self, colour, override=False, tint=False):
if not self.ttkFlag:
if self._getContainerProperty('type') == self.Widgets.RootPage:
# removed this - it makes the screen do funny stuff
# self.appWindow.config(background=colour)
self.bgLabel.config(background=colour)
self._getContainerProperty('container').config(background=colour)
for child in self._getContainerProperty('container').winfo_children():
if not self._isWidgetContainer(child):
# horrible hack to deal with weird ScrolledText
# winfo_children returns ScrolledText as a Frame
# therefore can't call some functions
# this gets the ScrolledText version
if gui.GET_WIDGET_TYPE(child) == "Frame":
for val in self.widgetManager.group(self.Widgets.TextArea).values():
if str(val) == str(child):
child = val
break
gui.SET_WIDGET_BG(child, colour, override, tint)
else:
gui.trace("In ttk mode - trying to set BG to %s", colour)
self.ttkStyle.configure(".", background=colour)
bg = property(getBg, setBg)
@staticmethod
def _isWidgetContainer(widget):
try:
if widget.isContainer:
return True
except:
pass
return False
def setResizable(self, canResize=True):
self._getTopLevel().isResizable = canResize
if self._getTopLevel().isResizable:
self._getTopLevel().resizable(True, True)
else:
self._getTopLevel().resizable(False, False)
def getResizable(self):
return self._getTopLevel().isResizable
resizable = property(getResizable, setResizable)
def _doTitleBar(self):
if self.platform == self.MAC:
self.warn("Title bar hiding doesn't work on MAC - app may become unresponsive.")
elif self.platform == self.LINUX:
self.warn("Title bar hiding doesn't work on LINUX - app may become unresponsive.")
self._getTopLevel().overrideredirect(not self.hasTitleBar)
def hideTitleBar(self):
self.hasTitleBar = False
self._doTitleBar()
def showTitleBar(self):
self.hasTitleBar = True
self._doTitleBar()
# function to set the window's title
def setTitle(self, title):
self._getTopLevel().title(title)
# function to get the window title
def getTitle(self):
return self._getTopLevel().title()
# property for setTitle
title = property(getTitle, setTitle)
# set an icon
def setIcon(self, image):
self.winIcon = image
container = self._getTopLevel()
if image.endswith('.ico'):
container.wm_iconbitmap(image)
else:
icon = self._getImage(image)
container.iconphoto(True, icon)
def getIcon(self):
return self.winIcon
# property for setTitle
icon = property(getIcon, setIcon)
def _getCanvas(self, param=-1):
if len(self.containerStack) > 1 and self.containerStack[param]['type'] == self.Widgets.SubWindow:
return self.containerStack[param]['container']
elif len(self.containerStack) > 1:
return self._getCanvas(param-1)
else:
return self.topLevel
def _getTopLevel(self):
if len(self.containerStack) > 1 and self._getContainerProperty('type') == self.Widgets.SubWindow:
return self._getContainerProperty('container')
else:
return self.topLevel
# make the window transparent (between 0 & 1)
def setTransparency(self, percentage):
if self.platform == self.LINUX:
self.warn("Transparency not supported on LINUX")
else:
if percentage > 1:
percentage = float(percentage) / 100
self._getTopLevel().attributes("-alpha", percentage)
def getTransparency(self):
return self._getTopLevel().attributes("-alpha") * 100
# property for setTransparency
transparency = property(getTransparency, setTransparency)
##############################
# functions to deal with tabbing and right clicking
##############################
def _focusNextWindow(self, event):
event.widget.tk_focusNext().focus_set()
nowFocus = self.topLevel.focus_get()
if isinstance(nowFocus, Entry):
nowFocus.select_range(0, END)
return("break")
def _focusLastWindow(self, event):
event.widget.tk_focusPrev().focus_set()
nowFocus = self.topLevel.focus_get()
if isinstance(nowFocus, Entry):
nowFocus.select_range(0, END)
return("break")
# creates relevant bindings on the widget
def _addRightClickMenu(self, widget):
if self.platform in [self.WINDOWS, self.LINUX]:
widget.bind('<Button-3>', self._rightClick)
else:
widget.bind('<Button-2>', self._rightClick)
def _rightClick(self, event, menu="EDIT"):
event.widget.focus()
if menu == "EDIT":
if self._prepareCopyAndPasteMenu(event):
self.widgetManager.get(self.Widgets.Menu, menu).focus_set()
self.widgetManager.get(self.Widgets.Menu, menu).post(event.x_root - 10, event.y_root - 10)
else:
self.widgetManager.get(self.Widgets.Menu, menu).focus_set()
self.widgetManager.get(self.Widgets.Menu, menu).post(event.x_root - 10, event.y_root - 10)
return "break"
#####################################
# FUNCTION to configure widgets
#####################################
def _getItems(self, kind):
if kind in [self.Widgets.FileEntry, self.Widgets.DirectoryEntry]:
return self.widgetManager.group(self.Widgets.Entry)
elif kind == self.Widgets.Page: # no dict of pages - the container manages them...
return self.widgetManager.group(self.Widgets.PagedWindow)
elif kind == self.Widgets.Tab: # no dict of tabs - the container manages them...
return self.widgetManager.group(self.Widgets.TabbedFrame)
elif kind == self.Widgets.Note:
return self.widgetManager.group(self.Widgets.Notebook)
else:
return self.widgetManager.group(kind)
def configureAllWidgets(self, kind, option, value):
items = list(self.widgetManager.group(kind))
self.configureWidgets(kind, items, option, value)
def configureWidgets(self, kind, names, option, value):
if not isinstance(names, list):
self.configureWidget(kind, names, option, value)
else:
for widg in names:
# incase 2D array, eg. buttons
if isinstance(widg, list):
for widg2 in widg:
self.configureWidget(kind, widg2, option, value)
else:
self.configureWidget(kind, widg, option, value)
def getWidget(self, kind, name, val=None):
# if val is set (RadioButtons) - append it
if val is not None: name+= "-" + val
return self.widgetManager.get(kind, name)
def getWidgetProperty(self, kind, name, val, prop):
return self.getWidget(kind, name, val).cget(prop)
def addWidget(self, title, widg, row=None, column=0, colspan=0, rowspan=0):
''' adds a generic widget to the appJar grid manager '''
self.widgetManager.verify(self.Widgets.Widget, title)
self._positionWidget(widg, row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.Widget, title, widg)
def configureWidget(self, kind, name, option, value, key=None, deprecated=False):
gui.trace("Configuring: %s of %s with %s of %s", name, kind, option, value)
# warn about deprecated functions
if deprecated:
self.warn("Deprecated config function (%s) used for %s -> %s use %s deprecated", option, self.Widgets.name(kind), name, deprecated)
if kind == self.Widgets.RadioButton:
items = self.widgetManager.group(kind)
new_items = []
for k, v in items.items():
if k.startswith(name+"-"):
new_items.append(v)
if len(new_items) > 0:
items = new_items
# stops multipl events...
if option in ['change', 'command']:
items = [items[0]]
else:
raise Exception("No RadioButtons found with that name " + name)
else:
# get the list of items for this type, and validate the widget is in the list
self.widgetManager.check(kind, name)
items = self.widgetManager.group(kind)
items = [items[name]]
# loop through each item, and try to reconfigure it
# this will often fail - widgets have varied config options
for item in items:
try:
if option == 'background':
gui.SET_WIDGET_BG(item, value, True)
elif option == 'foreground':
gui.SET_WIDGET_FG(item, value, True)
elif option == 'disabledforeground':
item.config(disabledforeground=value)
elif option == 'disabledbackground':
item.config(disabledbackground=value)
elif option == 'activeforeground':
item.config(activeforeground=value)
elif option == 'activebackground':
item.config(activebackground=value)
elif option == 'inactiveforeground':
if kind == self.Widgets.TabbedFrame:
item.config(inactiveforeground=value)
else:
self.warn("Error configuring %s: can't set inactiveforeground", name )
elif option == 'inactivebackground':
if kind in [self.Widgets.TabbedFrame, self.Widgets.Table]:
item.config(inactivebackground=value)
else:
self.warn("Error configuring %s: can't set inactivebackground", name)
elif option == 'width':
item.config(width=value)
elif option == 'height':
item.config(height=value)
elif option == 'state':
# make entries readonly - can still copy/paste
if kind == self.Widgets.Entry:
if value == "disabled":
if hasattr(item, 'but'):
item.but.config(state=value)
item.unbind("<Button-1>")
value = "readonly"
elif value == 'normal' and hasattr(item, 'but') and item.cget('state') != 'normal':
item.bind("<Button-1>", item.click_command, "+")
item.but.config(state=value)
item.config(state=value)
elif option == 'relief':
item.config(relief=value)
elif option == 'style':
if self.ttkFlag:
gui.trace("%s configured with ttk style %s", name, value)
item.config(style=value)
else:
self.warn("Error configuring %s: can't set ttk style, not in ttk mode.", name)
elif option in ['align', 'anchor']:
if kind == self.Widgets.Entry or gui.GET_WIDGET_TYPE(item) == 'SelectableLabel':
if value == W: value = LEFT
elif value == E: value = RIGHT
item.config(justify=value)
elif kind == self.Widgets.LabelFrame:
item.config(labelanchor=value)
else:
if value == LEFT: value = "w"
elif value == RIGHT: value = "e"
item.config(anchor=value)
elif option == 'cursor':
item.config(cursor=value)
elif option == 'tooltip':
self._addTooltip(item, value)
elif option == 'disableTooltip':
self._disableTooltip(item)
elif option == 'enableTooltip':
self._enableTooltip(item)
elif option == "focus":
item.focus_set()
if kind == self.Widgets.Entry:
item.icursor(END)
item.xview(END)
# event bindings
elif option == 'over':
self._bindOverEvent(kind, name, item, value, option, key)
elif option == 'drag':
self._bindDragEvent(kind, name, item, value, option, key)
elif option in ['command', "change", "submit"]:
self._bindEvent(kind, name, item, value, option, key)
elif option == 'sticky':
info = {}
# need to reposition the widget in its grid
if self._widgetHasContainer(kind, item):
# pack uses LEFT & RIGHT & BOTH
info["side"] = value
if value.lower() == "both":
info["expand"] = 1
info["side"] = "right"
else:
info["expand"] = 0
else:
# grid uses E+W
if value.lower() == "left":
side = W
elif value.lower() == "right":
side = E
elif value.lower() == "both":
side = W + E
else:
side = value.upper()
info["sticky"] = side
self._repackWidget(item, info)
elif option == 'padding':
if value[1] is None:
item.config(padx=value[0][0], pady=value[0][1])
else:
item.config(padx=value[0], pady=value[1])
elif option == 'ipadding':
if value[1] is None:
item.config(ipadx=value[0][0], ipady=value[0][1])
else:
item.config(ipadx=value[0], ipady=value[1])
elif option == 'rightClick':
self._bindRightClick(item, value)
elif option == 'internalDrop':
self._registerInternalDropTarget(item, value)
elif option == 'internalDrag':
self._registerInternalDragSource(kind, name, item, value)
elif option == 'externalDrop':
self._registerExternalDropTarget(name, item, value[0], value[1])
elif option == 'externalDrag':
self._registerExternalDragSource(name, item, value)
except TclError as e:
self.warn("Error configuring %s: %s", name, str(e))
# generic function for over events
def _validateFunctionList(self, functions, mode):
if type(functions) == tuple:
functions = list(functions)
elif type(functions) != list:
functions = [functions]
if len(functions) == 1:
functions.append(None)
if len(functions) != 2:
raise Exception("Invalid arguments, set<widget> %s Function requires 1 or 2 functions to be passed in.", mode)
return functions
def _bindOverEvent(self, kind, name, widget, functions, eventType, key=None):
functions = self._validateFunctionList(functions, "Over")
if functions[0] is not None:
widget.bind("<Enter>", self.MAKE_FUNC(functions[0], name), add="+")
if functions[1] is not None:
widget.bind("<Leave>", self.MAKE_FUNC(functions[1], name), add="+")
# generic function for drag events
def _bindDragEvent(self, kind, name, widget, functions, eventType, key=None):
functions = self._validateFunctionList(functions, "Drag")
if kind == self.Widgets.Label:
widget.config(cursor="fleur")
def getLabel(f):
# loop through all labels
items = self.widgetManager.group(kind)
for key, value in items.items():
if self._isMouseInWidget(value):
self.MAKE_FUNC(f,key)()
return
if functions[0] is not None:
widget.bind("<ButtonPress-1>", self.MAKE_FUNC(functions[0], name), add="+")
if functions[1] is not None:
widget.bind("<ButtonRelease-1>", self.MAKE_FUNC(getLabel, functions[1]), add="+")
else:
self.error("Only able to bind drag events to labels")
# generic function for change/submit/events
def _bindEvent(self, kind, name, widget, function, eventType, key=None):
# this will discard the scale value, as default function
# can't handle it
if kind == self.Widgets.Scale:
cmd = self.MAKE_FUNC(function, name)
widget.cmd_id = widget.var.trace('w', cmd)
widget.cmd = cmd
elif kind == self.Widgets.OptionBox:
if widget.kind == "ticks":
vals = self.widgetManager.get(self.Widgets.TickOptionBox, name, group=WidgetManager.VARS)
for o in vals:
cmd = self.MAKE_FUNC(function, name)
vals[o].cmd_id = vals[o].trace('w', cmd)
vals[o].cmd = cmd
else:
cmd = self.MAKE_FUNC(function, name)
# need to trace the variable??
widget.cmd_id = widget.var.trace('w', cmd)
widget.cmd = cmd
elif kind in [self.Widgets.Entry, self.Widgets.FileEntry, self.Widgets.DirectoryEntry]:
if eventType == "change":
# not populated by change/submit
if key is None:
key = name
cmd = self.MAKE_FUNC(function, key)
# get Entry variable
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
var.cmd_id = var.trace('w', cmd)
var.cmd = cmd
else:
# not populated by change/submit
if key is None:
key = name
sbm = self.MAKE_FUNC(function, key)
widget.sbm_id = widget.bind('<Return>', sbm)
widget.sbm = sbm
elif kind == self.Widgets.TextArea:
if eventType == "change":
# get Entry variable
cmd = self.MAKE_FUNC(function, name)
widget.bindChangeEvent(cmd)
elif kind == self.Widgets.Button:
if eventType == "change":
self.warn("Error configuring %s : can't set a change function", name)
else:
widget.config(command=self.MAKE_FUNC(function, name))
widget.bind('<Return>', self.MAKE_FUNC(function, name))
# make labels clickable, add a cursor, and change the look
elif kind == self.Widgets.Label or kind == self.Widgets.Image:
if eventType in ["command", "submit"]:
if self.platform == self.MAC:
widget.config(cursor="pointinghand")
elif self.platform in [self.WINDOWS, self.LINUX]:
widget.config(cursor="hand2")
cmd = self.MAKE_FUNC(function, name)
widget.bind("<Button-1>", cmd, add="+")
widget.cmd = cmd
# these look good, but break when dialogs take focus
#up = widget.cget("relief").lower()
# down="sunken"
# make it look like it's pressed
#widget.bind("<Button-1>",lambda e: widget.config(relief=down), add="+")
#widget.bind("<ButtonRelease-1>",lambda e: widget.config(relief=up))
elif eventType == "change":
self.warn("Error configuring %s : can't set a change function", name)
elif kind == self.Widgets.ListBox:
cmd = self.MAKE_FUNC(function, name)
widget.bind('<<ListboxSelect>>', cmd)
widget.cmd = cmd
elif kind in [self.Widgets.RadioButton]:
cmd = self.MAKE_FUNC(function, name)
# get rb variable
var = self.widgetManager.get(self.Widgets.RadioButton, name, group=WidgetManager.VARS)
# only allow one trace to be bound
# users are more likely to call multiple binds on radios
# because they all share one var
if hasattr(var, "cmd_id"):
var.trace_vdelete('w', var.cmd_id)
var.cmd_id = var.trace('w', cmd)
var.cmd = cmd
elif kind == self.Widgets.Properties:
cmd = self.MAKE_FUNC(function, name)
widget.setChangeFunction(cmd)
elif kind == self.Widgets.FrameStack:
cmd = self.MAKE_FUNC(function, name)
widget.setChangeFunction(cmd)
elif kind == self.Widgets.SpinBox:
widget.cmd = self.MAKE_FUNC(function, name)
widget.cmd_id = widget.var.trace("w", widget.cmd)
else:
if kind not in [self.Widgets.CheckBox]:
self.warn("Unmanaged binding of %s to %s", eventType, name)
cmd = self.MAKE_FUNC(function, name)
widget.config(command=cmd)
widget.cmd = cmd
# dynamic way to create the configuration functions
def _buildConfigFuncs(self):
# loop through all the available widgets
# and make all the below functons for each one
for v in self.Widgets.funcs():
k = self.Widgets.get(v)
exec( "def set" + v +
"Bg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'background', val)")
exec("gui.set" + v + "Bg=set" + v + "Bg")
exec( "def set" + v +
"Fg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'foreground', val)")
exec("gui.set" + v + "Fg=set" + v + "Fg")
exec( "def set" + v +
"DisabledFg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'disabledforeground', val)")
exec("gui.set" + v + "DisabledFg=set" + v + "DisabledFg")
exec( "def set" + v +
"DisabledBg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'disabledbackground', val)")
exec("gui.set" + v + "DisabledBg=set" + v + "DisabledBg")
exec( "def set" + v +
"ActiveFg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'activeforeground', val)")
exec("gui.set" + v + "ActiveFg=set" + v + "ActiveFg")
exec( "def set" + v +
"ActiveBg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'activebackground', val)")
exec("gui.set" + v + "ActiveBg=set" + v + "ActiveBg")
exec( "def set" + v +
"InactiveFg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'inactiveforeground', val)")
exec("gui.set" + v + "InactiveFg=set" + v + "InactiveFg")
exec( "def set" + v +
"InactiveBg(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'inactivebackground', val)")
exec("gui.set" + v + "InactiveBg=set" + v + "InactiveBg")
exec( "def set" + v +
"Width(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'width', val)")
exec("gui.set" + v + "Width=set" + v + "Width")
exec( "def set" + v +
"Height(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'height', val)")
exec("gui.set" + v + "Height=set" + v + "Height")
exec( "def set" + v +
"State(self, name, val): self.configureWidgets(" +
str(k) + ", name, 'state', val)")
exec("gui.set" + v + "State=set" + v + "State")
exec( "def set" + v +
"Padding(self, name, x, y=None): self.configureWidgets(" +
str(k) + ", name, 'padding', [x, y])")
exec("gui.set" + v + "Padding=set" + v + "Padding")
exec( "def set" + v +
"IPadding(self, name, x, y=None): self.configureWidgets(" +
str(k) + ", name, 'ipadding', [x, y])")
exec("gui.set" + v + "IPadding=set" + v + "IPadding")
exec( "def set" + v +
"InPadding(self, name, x, y=None): self.configureWidgets(" +
str(k) + ", name, 'ipadding', [x, y])")
exec("gui.set" + v + "InPadding=set" + v + "InPadding")
# drag and drop stuff
exec( "def set" + v +
"DropTarget(self, name, function=None, replace=True): self.configureWidgets(" +
str(k) + ", name, 'externalDrop', [function, replace])")
exec("gui.set" + v + "DropTarget=set" + v + "DropTarget")
exec( "def set" + v +
"DragSource(self, name, function=None): self.configureWidgets(" +
str(k) + ", name, 'externalDrag', function)")
exec("gui.set" + v + "DragSource=set" + v + "DragSource")
exec( "def register" + v +
"Draggable(self, name, function=None): self.configureWidgets(" +
str(k) + ", name, 'internalDrag', function)")
exec("gui.register" + v + "Draggable=register" + v + "Draggable")
exec( "def register" + v +
"Droppable(self, name, function=None): self.configureWidgets(" +
str(k) + ", name, 'internalDrop', function)")
exec("gui.register" + v + "Droppable=register" + v + "Droppable")
exec( "def set" + v +
"Style(self, name, val): self.configureWidget(" +
str(k) + ", name, 'style', val)")
exec("gui.set" + v + "Style=set" + v + "Style")
# might not all be necessary, could make exclusion list
exec( "def set" + v +
"Relief(self, name, val): self.configureWidget(" +
str(k) + ", name, 'relief', val)")
exec("gui.set" + v + "Relief=set" + v + "Relief")
exec( "def set" + v +
"Align(self, name, val): self.configureWidget(" +
str(k) + ", name, 'align', val)")
exec("gui.set" + v + "Align=set" + v + "Align")
exec( "def set" + v +
"Anchor(self, name, val): self.configureWidget(" +
str(k) + ", name, 'anchor', val)")
exec("gui.set" + v + "Anchor=set" + v + "Anchor")
exec( "def set" + v +
"Tooltip(self, name, val): self.configureWidget(" +
str(k) + ", name, 'tooltip', val)")
exec("gui.set" + v + "Tooltip=set" + v + "Tooltip")
exec( "def disable" + v +
"Tooltip(self, name): self.configureWidget(" +
str(k) + ", name, 'disableTooltip', None)")
exec("gui.disable" + v + "Tooltip=disable" + v + "Tooltip")
exec( "def enable" + v +
"Tooltip(self, name): self.configureWidget(" +
str(k) + ", name, 'enableTooltip', None)")
exec("gui.enable" + v + "Tooltip=enable" + v + "Tooltip")
# function setters
exec( "def set" + v +
"ChangeFunction(self, name, val): self.configureWidget(" +
str(k) + ", name, 'change', val)")
exec("gui.set" + v + "ChangeFunction=set" + v + "ChangeFunction")
exec( "def set" + v +
"SubmitFunction(self, name, val): self.configureWidget(" +
str(k) + ", name, 'submit', val)")
exec("gui.set" + v + "SubmitFunction=set" + v + "SubmitFunction")
exec( "def set" + v +
"DragFunction(self, name, val): self.configureWidget(" +
str(k) + ", name, 'drag', val)")
exec("gui.set" + v + "DragFunction=set" + v + "DragFunction")
exec( "def set" + v +
"OverFunction(self, name, val): self.configureWidget(" +
str(k) + ", name, 'over', val)")
exec("gui.set" + v + "OverFunction=set" + v + "OverFunction")
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/cursors.html
exec( "def set" + v +
"Cursor(self, name, val): self.configureWidget(" +
str(k) + ", name, 'cursor', val)")
exec("gui.set" + v + "Cursor=set" + v + "Cursor")
exec( "def set" + v +
"Focus(self, name): self.configureWidget(" +
str(k) + ", name, 'focus', None)")
exec("gui.set" + v + "Focus=set" + v + "Focus")
# change the stickyness
exec( "def set" + v +
"Sticky(self, name, pos): self.configureWidget(" +
str(k) + ", name, 'sticky', pos)")
exec("gui.set" + v + "Sticky=set" + v + "Sticky")
# add right click
exec( "def set" + v +
"RightClick(self, name, menu): self.configureWidget(" +
str(k) + ", name, 'rightClick', menu)")
exec("gui.set" + v + "RightClick=set" + v + "RightClick")
# functions to manage widgets
exec( "def show" + v +
"(self, name): self.showWidgetType(" +
str(k) + ", name)")
exec("gui.show" + v + "=show" + v)
exec( "def hide" + v +
"(self, name): self.hideWidgetType(" +
str(k) + ", name)")
exec("gui.hide" + v + "=hide" + v)
exec( "def remove" + v +
"(self, name): self.removeWidgetType(" +
str(k) + ", name)")
exec("gui.remove" + v + "=remove" + v)
# convenience functions for enable/disable
# might not all be necessary, could make exclusion list
exec( "def enable" + v +
"(self, name): self.configureWidget(" +
str(k) + ", name, 'state', 'normal')")
exec("gui.enable" + v + "=enable" + v)
exec( "def disable" + v +
"(self, name): self.configureWidget(" +
str(k) + ", name, 'state', 'disabled')")
exec("gui.disable" + v + "=disable" + v)
# group functions
exec( "def set" + v +
"Widths(self, names, val): self.configureWidgets(" +
str(k) + ", names, 'width', val)")
exec("gui.set" + v + "Widths=set" + v + "Widths")
exec( "def setAll" + v +
"Widths(self, val): self.configureAllWidgets(" +
str(k) + ", 'width', val)")
exec("gui.setAll" + v + "Widths=setAll" + v + "Widths")
exec( "def set" + v +
"Heights(self, names, val): self.configureWidgets(" +
str(k) + ", names, 'height', val)")
exec("gui.set" + v + "Heights=set" + v + "Heights")
exec( "def setAll" + v +
"Heights(self, val): self.configureAllWidgets(" +
str(k) + ", 'height', val)")
exec("gui.setAll" + v + "Heights=setAll" + v + "Heights")
exec( "def get" + v +
"Widget(self, name, val=None): return self.getWidget(" +
str(k) + ", name, val)")
exec("gui.get" + v + "Widget=get" + v + "Widget")
exec( "def get" + v +
"Bg(self, name, val=None): return self.getWidgetProperty(" +
str(k) + ", name, val, 'bg')")
exec("gui.get" + v + "Bg=get" + v + "Bg")
#####################################
# FUNCTION to hide/show/remove widgets
#####################################
def _widgetHasContainer(self, kind, item):
if kind in (
self.Widgets.Scale,
self.Widgets.Entry,
self.Widgets.SpinBox,
self.Widgets.OptionBox,
self.Widgets.Label) and item.inContainer:
return True
else:
return False
def hideWidgetType(self, kind, name):
item = self.widgetManager.get(kind, name)
if self._widgetHasContainer(kind, item):
gui.trace("Hiding widget in container: %s", name)
widget = item.master
if hasattr(widget, "inContainer") and widget.inContainer:
gui.trace("Have container in container")
widget = widget.master
try: self.widgetManager.get(self.Widgets.FrameLabel, name).hidden = True
except: pass
else:
gui.trace("Hiding widget: %s", name)
if kind in [self.Widgets.RadioButton]:
for rb in item:
if rb.text == name:
widget = rb
widget = item
if "in" in widget.grid_info():
gui.trace("Widget hidden: %s", name)
widget.grid_remove()
else:
gui.trace("Hiding failed - %s not showing", name)
def showWidgetType(self, kind, name):
item = self.widgetManager.get(kind, name)
if self._widgetHasContainer(kind, item):
gui.trace("Showing widget in container: %s", name)
widget = item.master
if hasattr(widget, "inContainer") and widget.inContainer:
gui.trace("Have container in container")
widget = widget.master
try: self.widgetManager.get(self.Widgets.FrameLabel, name).hidden = False
except: pass
else:
msg = "Showing widget"
widget = item
# only show the widget, if it's not already showing
if "in" not in widget.grid_info():
gui.trace("Widget shown: %s", name)
widget.grid()
# self._updateLabelBoxes(name, widget.grid_info()['column'])
else:
gui.trace("Showing failed - %s already showing", name)
def removeWidgetType(self, kind, name):
item = self.widgetManager.get(kind, name)
# if it's a flasher, remove it
if item in self.widgetManager.group(self.Widgets.FlashLabel):
gui.trace("Remove flash label: %s", name)
self.widgetManager.remove(self.Widgets.FlashLabel, item)
if len(self.widgetManager.group(self.Widgets.FlashLabel)) == 0:
self.doFlash = False
# animated images...
if self._widgetHasContainer(kind, item):
gui.trace("Remove widget (%s) in container: %s", kind, name)
parent = item.master
# is it a container in a labelBox?
# if so - remove & destroy the labelBox
if hasattr(parent, "inContainer") and parent.inContainer:
gui.trace("Container in container")
labParent = parent.master
self.widgetManager.remove(self.Widgets.FrameBox, labParent)
self.widgetManager.remove(self.Widgets.Label, name)
self.widgetManager.remove(self.Widgets.FrameLabel, name)
labParent.grid_forget()
labParent.destroy()
# otherwise destroy this container & a label if we have one
else:
parent.grid_forget()
parent.destroy()
try:
self.widgetManager.remove(self.Widgets.Label, name)
self.widgetManager.remove(self.Widgets.FrameLabel, name)
except: pass
self.widgetManager.remove(self.Widgets.FrameBox, parent)
else:
gui.trace("Remove widget: %s", name)
item.grid_forget()
item.destroy()
# finally remove the widget - this will also remove the variable
self.widgetManager.remove(kind, name)
def removeAllWidgets(self, current=False):
if current: containerData = self.containerStack[-1]
else: containerData = self.containerStack[0]
container = containerData['container']
for child in container.winfo_children():
child.destroy()
self._configBg(container)
# reset the grid measurements
for i in range(Grid.grid_size(container)[0]):
container.columnconfigure(i, minsize=0, weight=0, pad=0)
for i in range(Grid.grid_size(container)[1]):
container.rowconfigure(i, minsize=0, weight=0, pad=0)
containerData = self._prepContainer(containerData["title"], containerData["type"], containerData["container"], 0, 1)
self.containerStack[0] = containerData
self._initVars(reset=True)
self.setSize(None)
#####################################
# FUNCTION for managing commands
#####################################
@staticmethod
def MAKE_FUNC(funcName, param):
''' function to automate lambdas '''
# make sure we get a function
if not callable(funcName) and not hasattr(funcName, '__call__'):
raise Exception("Invalid function: " + str(funcName))
# check if the function requires arguments
argsList = getArgs(funcName)
# if no args, or 1 arg in a bound function
noArgs = len(argsList[0])==0 or (len(argsList[0])==1 and inspect.ismethod(funcName))
# if no args/varargs/kwargs then don't give the param
if noArgs and argsList[1] is None and argsList[2] is None:
return lambda *args: funcName()
else:
return lambda *args: funcName(param)
def _checkFunc(self, names, funcs):
singleFunc = None
if funcs is None:
return None
elif callable(funcs):
singleFunc = funcs
elif len(names) != len(funcs):
raise Exception("List sizes don't match")
return singleFunc
#####################################
# FUNCTIONS to position a widget
#####################################
def getRow(self):
return self._getContainerProperty('emptyRow')
def gr(self):
return self.getRow()
def setRow(self, row):
self.containerStack[-1]['emptyRow'] = row
row = property(getRow, setRow)
def _repackWidget(self, widget, params):
if widget.winfo_manager() == "grid":
ginfo = widget.grid_info()
ginfo.update(params)
widget.grid(ginfo)
elif widget.winfo_manager() == "pack":
pinfo = widget.pack_info()
pinfo.update(params)
widget.pack(pinfo)
else:
raise Exception("Unknown geometry manager: " + widget.winfo_manager())
# convenience function to set RCS, referencing the current container's
# settings
def _getRCS(self, row, column, colspan, rowspan):
if row in[-1, 'previous', 'p', 'pr']:
row = self._getContainerProperty('emptyRow') - 1
else:
# this is the default,
if row is None or row in ['next', 'n']:
row = self._getContainerProperty('emptyRow')
self.containerStack[-1]['emptyRow'] = row + 1
if column >= self._getContainerProperty('colCount'):
self.containerStack[-1]['colCount'] = column + 1
# if column == 0 and colspan == 0 and self._getContainerProperty('colCount') > 1:
# colspan = self._getContainerProperty('colCount')
return row, column, colspan, rowspan
@staticmethod
def GET_WIDGET_TYPE(widget):
return widget.__class__.__name__
@staticmethod
def SET_WIDGET_FG(widget, fg, external=False):
widgType = gui.GET_WIDGET_TYPE(widget)
gui.trace("SET_WIDGET_FG: %s - %s", widgType, fg)
# only configure these widgets if external
if widgType == "Link":
if external:
widget.fg = fg
widget.overFg = gui.TINT(widget, fg)
widget.config(foreground=fg)
elif widgType in ["Entry", "AutoCompleteEntry"]:
if external:
widget.oldFg = fg
if not widget.showingDefault:
widget.config(foreground=fg)
elif widgType in ["Spinbox", "AjText", "AjScrolledText", "Button"]:
if external:
widget.config(fg=fg)
elif widgType == "OptionMenu":
if external:
widget.config(fg=fg)
widget["menu"].config(fg=fg)
# handle flash labels
elif widgType == "Label":
widget.config(fg=fg)
widget.origFg=fg
try: widget.config(bg=widget.origBg)
except: pass # not a flash label
# deal with generic groupers
elif widgType in ["Frame", "LabelFrame", "PanedFrame", "Pane", "ajFrame"]:
for child in widget.winfo_children():
gui.SET_WIDGET_FG(child, fg, external)
# deal with specific containers
elif widgType == "LabelBox":
try:
if not widget.isValidation:
gui.SET_WIDGET_FG(widget.theLabel, fg, external)
except Exception as e:
gui.SET_WIDGET_FG(widget.theLabel, fg, external)
gui.SET_WIDGET_FG(widget.theWidget, fg, external)
elif widgType == "ButtonBox":
gui.SET_WIDGET_FG(widget.theWidget, fg, external)
gui.SET_WIDGET_FG(widget.theButton, fg, external)
elif widgType == "WidgetBox":
for child in widget.theWidgets:
gui.SET_WIDGET_FG(child, fg, external)
elif widgType == "ListBoxContainer":
if external:
gui.SET_WIDGET_FG(widget.lb, fg, external)
# skip these widgets
elif widgType in ["PieChart", "MicroBitSimulator", "Scrollbar"]:
pass
# always try these widgets
else:
try:
widget.config(fg=fg)
except Exception as e:
pass
@staticmethod
def TINT(widget, colour):
col = []
for a, b in enumerate(widget.winfo_rgb(colour)):
t = int(min(max(0, b / 256 + (255 - b / 256) * .3), 255))
t = str(hex(t))[2:]
if len(t) == 1:
t = '0' + t
elif len(t) == 0:
t = '00'
col.append(t)
if int(col[0], 16) > 210 and int(col[1], 16) > 210 and int(col[2], 16) > 210:
if gui.GET_PLATFORM() == gui.LINUX:
return "#c3c3c3"
else:
return "systemHighlight"
else:
return "#" + "".join(col)
# convenience method to set a widget's bg
@staticmethod
def SET_WIDGET_BG(widget, bg, external=False, tint=False):
if bg is None: # ignore empty colours
return
widgType = gui.GET_WIDGET_TYPE(widget)
isDarwin = gui.GET_PLATFORM() == gui.MAC
isLinux = gui.GET_PLATFORM() == gui.LINUX
gui.trace("Config %s BG to %s", widgType, bg)
# these have a highlight border to remove
hideBorders = [ "Text", "AjText",
"ScrolledText", "AjScrolledText",
"Scale", "AjScale",
"OptionMenu",
"Entry", "AutoCompleteEntry",
"Radiobutton", "Checkbutton",
"Button"]
# these shouldn't have their BG coloured by default
noBg = [ "Button",
"Scale", "AjScale",
"Spinbox", "Listbox", "OptionMenu",
"SplitMeter", "DualMeter", "Meter",
"Entry", "AutoCompleteEntry",
"Text", "AjText",
"ScrolledText", "AjScrolledText",
"ToggleFrame"]
# remove the highlight borders
if widgType in hideBorders:
if widgType == "Entry" and widget.isValidation:
pass
elif widgType == "OptionMenu":
widget["menu"].config(borderwidth=0)
widget.config(highlightbackground=bg)
if isDarwin:
widget.config(background=bg)
elif widgType in ["Radiobutton", "Checkbutton"]:
widget.config(activebackground=bg, highlightbackground=bg)
else:
widget.config(highlightbackground=bg)
# do some fancy tinting
if external or tint:
if widgType in ["Button", "Scale", "AjScale"]:
widget.config(activebackground=gui.TINT(widget, bg))
elif widgType in ["Entry", "Text", "AjText", "ScrolledText", "AjScrolledText", "AutoCompleteEntry", "Spinbox"]:
widget.config(selectbackground=gui.TINT(widget, bg))
widget.config(highlightcolor=gui.TINT(widget, bg))
if widgType in ["Text", "AjText", "ScrolledText", "AjScrolledText"]:
widget.config(inactiveselectbackground=gui.TINT(widget, bg))
elif widgType == "Spinbox":
widget.config(buttonbackground=bg)
elif widgType == "Listbox":
widget.config(selectbackground=gui.TINT(widget, bg))
elif widgType == "OptionMenu":
widget.config(activebackground=gui.TINT(widget, bg))
widget["menu"].config(activebackground=gui.TINT(widget, bg))
elif widgType in ["Radiobutton", "Checkbutton"]:
widget.config(activebackground=gui.TINT(widget, bg))
# if this is forced - change everything
if external:
widget.config(bg=bg)
if widgType == "OptionMenu":
widget["menu"].config(bg=bg)
# otherwise only colour un-excluded widgets
elif widgType not in noBg:
widget.config(bg=bg)
# deal with flash labels
if widgType == "Label":
widget.origBg=bg
try: widget.config(fg=widget.origFg)
except: pass # not a flash label
# now do any of the below containers
if widgType in ["LabelFrame", "PanedFrame", "Pane", "ajFrame"]:
for child in widget.winfo_children():
gui.SET_WIDGET_BG(child, bg, external, tint)
elif widgType == "LabelBox": # widget with label, in frame
if widget.theLabel is not None:
gui.SET_WIDGET_BG(widget.theLabel, bg, external, tint)
gui.SET_WIDGET_BG(widget.theWidget, bg, external, tint)
elif widgType == "ButtonBox": # widget with button, in frame
gui.SET_WIDGET_BG(widget.theWidget, bg, external, tint)
gui.SET_WIDGET_BG(widget.theButton, bg, external, tint)
elif widgType == "ListBoxContainer": # list box container
gui.SET_WIDGET_BG(widget.lb, bg, external, tint)
elif widgType == "WidgetBox": # group of buttons or labels
for widg in widget.theWidgets:
gui.SET_WIDGET_BG(widg, bg, external, tint)
def _getContainerProperty(self, prop=None):
if prop is not None:
return self.containerStack[-1][prop]
else:
return self.containerStack[-1]
def _getContainerBg(self):
if not self.ttkFlag:
return self.getContainer()["bg"]
else:
return None
def _getContainerFg(self):
try:
return self._getContainerProperty('fg')
except:
return "#000000"
# two important things here:
# grid - sticky: position of widget in its space (side or fill)
# row/columns configure - weight: how to grow with GUI
def _positionWidget( self, widget, row, column=0, colspan=0, rowspan=0, sticky=W + E):
# allow item to be added to container
container = self.getContainer()
if not self.ttkFlag:
gui.SET_WIDGET_FG(widget, self._getContainerFg())
gui.SET_WIDGET_BG(widget, self._getContainerBg())
# alpha paned window placement
if self._getContainerProperty('type') == self.Widgets.PanedFrame:
container.add(widget)
self.containerStack[-1]['widgets'] = True
return
# else, add to grid
row, column, colspan, rowspan = self._getRCS(row, column, colspan, rowspan)
# build a dictionary for the named params
iX = self._getContainerProperty('ipadx')
iY = self._getContainerProperty('ipady')
cX = self._getContainerProperty('padx')
cY = self._getContainerProperty('pady')
params = {
"row": row,
"column": column,
"ipadx": iX,
"ipady": iY,
"padx": cX,
"pady": cY}
# if we have a column span, apply it
if colspan != 0:
params["columnspan"] = colspan
# if we have a rowspan, apply it
if rowspan != 0:
params["rowspan"] = rowspan
# 1) if param has sticky, use that
# 2) if container has sticky - override
# 3) else, none
if self._getContainerProperty("sticky") is not None:
params["sticky"] = self._getContainerProperty("sticky")
elif sticky is not None:
params["sticky"] = sticky
else:
pass
# make colspanned widgets expand to fill height of cell
if rowspan != 0:
if "sticky" in params:
if "n" not in params["sticky"]:
params["sticky"] += "n"
if "s" not in params["sticky"]:
params["sticky"] += "s"
else:
params["sticky"] = "ns"
# expand that dictionary out as we pass it as a value
widget.grid(**params)
self.containerStack[-1]['widgets'] = True
# if we're in a PANEDFRAME - we need to set parent...
if self._getContainerProperty('type') == self.Widgets.Pane:
self.containerStack[-2]['widgets'] = True
# configure the row/column to expand equally
if self._getContainerProperty('expand') in ["ALL", "COLUMN"]:
Grid.columnconfigure(container, column, weight=1)
else:
Grid.columnconfigure(container, column, weight=0)
if self._getContainerProperty('expand') in ["ALL", "ROW"]:
Grid.rowconfigure(container, row, weight=1)
else:
Grid.rowconfigure(container, row, weight=0)
# self._getContainerProperty('container').columnconfigure(0, weight=1)
# self._getContainerProperty('container').rowconfigure(0, weight=1)
#####################################
# FUNCTION to manage containers
#####################################
# prepares a new empty container dict
def _prepContainer(self, cTitle, cType, container, row, col, sticky=None):
containerData = {'type': cType,
'title': cTitle,
'container': container,
'emptyRow': row,
'colCount': col,
'sticky': sticky,
'padx': 0,
'pady': 0,
'ipadx': 0,
'ipady': 0,
'expand': "ALL",
'widgets': False,
'inputFont': self._inputFont,
'labelFont': self._labelFont,
'buttonFont': self._buttonFont,
"fg": self._getContainerFg()}
return containerData
# adds the container to the container stack - makes this the current working container
def _addContainer(self, cTitle, cType, container, row, col, sticky=None):
containerData = self._prepContainer(cTitle, cType, container, row, col, sticky)
self.containerStack.append(containerData)
def openFrameStack(self, title):
self._openContainer(self.Widgets.FrameStack, title)
def openSubFrame(self, frameTitle, frameNumber):
self._openContainer(self.Widgets.SubFrame, frameTitle+"__"+str(frameNumber))
def openRootPage(self, title):
self._openContainer(self.Widgets.RootPage, title)
def openLabelFrame(self, title):
self._openContainer(self.Widgets.LabelFrame, title)
def openFrame(self, title):
try: self._openContainer(self.Widgets.Frame, title)
except: self._openContainer(self.Widgets.SubFrame, title)
def openToggleFrame(self, title):
self._openContainer(self.Widgets.ToggleFrame, title)
def openPagedWindow(self, title):
self._openContainer(self.Widgets.PagedWindow, title)
def openPage(self, windowTitle, pageNumber):
self._openContainer(self.Widgets.Page, windowTitle+"__"+str(pageNumber))
def openTabbedFrame(self, title):
self._openContainer(self.Widgets.TabbedFrame, title)
def openTab(self, frameTitle, tabTitle):
self._openContainer(self.Widgets.Tab, frameTitle+"__"+tabTitle)
def openNotebook(self, title):
self._openContainer(self.Widgets.Notebook, title)
def openNote(self, frameTitle, tabTitle):
self._openContainer(self.Widgets.Notebook, frameTitle+"__"+tabTitle)
def openPanedFrame(self, title):
self._openContainer(self.Widgets.PanedFrame, title)
def openPane(self, title):
self._openContainer(self.Widgets.Pane, title)
def openSubWindow(self, title):
self._openContainer(self.Widgets.SubWindow, title)
def openScrollPane(self, title):
self._openContainer(self.Widgets.ScrollPane, title)
# function to reload the specified container
def _openContainer(self, kind, title):
# get the cached container config for this container
kind = self.Widgets.name(kind)
cName = kind + "__" + title
try:
cConf = self.widgetManager.get(self.Widgets.ContainerLog, cName)
except KeyError:
raise Exception("Attempted to open invalid " + kind + ": " + str(title))
self.containerStack.append(cConf)
# returns the current working container
def getContainer(self):
container = self._getContainerProperty('container')
if self._getContainerProperty('type') == self.Widgets.ScrollPane:
return container.interior
elif self._getContainerProperty('type') == self.Widgets.PagedWindow:
return container.getPage()
elif self._getContainerProperty('type') == self.Widgets.ToggleFrame:
return container.getContainer()
elif self._getContainerProperty('type') == self.Widgets.SubWindow:
return container.canvasPane
else:
return container
# if possible, removes the current container
def _removeContainer(self):
if len(self.containerStack) == 1:
raise Exception("Can't remove container, already in root window.")
else:
container = self.containerStack.pop()
if not container['widgets']:
self.warn("Closing empty container: %s", container['title'])
# store the container so that it can be re-opened later
name = self.Widgets.name(container["type"]) + "__" + container["title"]
try:
self.widgetManager.add(self.Widgets.ContainerLog, name, container)
except:
pass # we'll ignore, as that means we already added it...
return container
# functions to start the various containers
def startContainer(self, fType, title, row=None, column=0, colspan=0, rowspan=0, sticky=None, name=None):
if name is None: name = title
if fType == self.Widgets.LabelFrame:
# first, make a LabelFrame, and position it correctly
self.widgetManager.verify(self.Widgets.LabelFrame, title)
if not self.ttkFlag:
container = LabelFrame(self.getContainer(), text=name, relief="groove")
container.config(background=self._getContainerBg(), font=self._getContainerProperty('labelFont'))
else:
container = ttk.LabelFrame(self.getContainer(), text=name, relief="groove")
container.DEFAULT_TEXT = name
container.isContainer = True
self.setPadX(5)
self.setPadY(5)
self._positionWidget(container, row, column, colspan, rowspan, "nsew")
self.widgetManager.add(self.Widgets.LabelFrame, title, container)
# now, add to top of stack
self._addContainer(title, self.Widgets.LabelFrame, container, 0, 1, sticky)
return container
elif fType == self.Widgets.Canvas:
# first, make a canvas, and position it correctly
self.widgetManager.verify(self.Widgets.Canvas, title)
container = Canvas(self.getContainer())
container.isContainer = True
self._positionWidget(container, row, column, colspan, rowspan, "nsew")
self.widgetManager.add(self.Widgets.Canvas, title, container)
# now, add to top of stack
self._addContainer(title, self.Widgets.Canvas, container, 0, 1, "")
return container
elif fType == self.Widgets.TabbedFrame:
self.widgetManager.verify(self.Widgets.TabbedFrame, title)
tabbedFrame = self._tabbedFrameMaker(self.getContainer(), self.ttkFlag, font=self._getContainerProperty('labelFont'))
if not self.ttkFlag:
tabbedFrame.config(bg=self._getContainerBg())
# tabbedFrame.isContainer = True
self._positionWidget(
tabbedFrame,
row,
column,
colspan,
rowspan,
sticky=sticky)
self.widgetManager.add(self.Widgets.TabbedFrame, title, tabbedFrame)
# now, add to top of stack
self._addContainer(title, self.Widgets.TabbedFrame, tabbedFrame, 0, 1, sticky)
return tabbedFrame
elif fType == self.Widgets.Tab:
# add to top of stack
self.containerStack[-1]['widgets'] = True
tabTitle = self._getContainerProperty('title') + "__" + title
self._addContainer(tabTitle,
self.Widgets.Tab, self._getContainerProperty('container').addTab(title), 0, 1, sticky)
elif fType == self.Widgets.Notebook:
if not self.ttkFlag:
raise Exception("Cannot create a ttk Notebook, unless ttk is enabled.")
self.widgetManager.verify(self.Widgets.Notebook, title)
notebook = ttk.Notebook(self.getContainer())
# tabbedFrame.isContainer = True
self._positionWidget(
notebook,
row,
column,
colspan,
rowspan,
sticky=sticky)
self.widgetManager.add(self.Widgets.Notebook, title, notebook)
# now, add to top of stack
self._addContainer(title, self.Widgets.Notebook, notebook, 0, 1, sticky)
return notebook
elif fType == self.Widgets.Note:
# add to top of stack
self.containerStack[-1]['widgets'] = True
noteTitle = self._getContainerProperty('title') + "__" + title
frame = ttk.Frame(self._getContainerProperty('container'))
self._getContainerProperty('container').add(frame, text=title)
self._addContainer(noteTitle, self.Widgets.Note, frame, 0, 1, sticky)
elif fType == self.Widgets.PanedFrame:
# if we previously put a frame for widgets
# remove it
if self._getContainerProperty('type') == self.Widgets.Pane:
self.stopContainer()
# now, add the new pane
self.widgetManager.verify(self.Widgets.PanedFrame, title)
pane = PanedWindow(
self.getContainer(),
showhandle=True,
sashrelief="groove",
bg=self._getContainerBg())
pane.isContainer = True
self._positionWidget(
pane, row, column, colspan, rowspan, sticky=sticky)
self.widgetManager.add(self.Widgets.PanedFrame, title, pane)
# now, add to top of stack
self._addContainer(title, self.Widgets.PanedFrame, pane, 0, 1, sticky)
# now, add a frame to the pane
self.startContainer(self.Widgets.Pane, title)
return pane
elif fType == self.Widgets.Pane:
# create a frame, and add it to the pane
pane = Pane(self.getContainer(), bg=self._getContainerBg())
pane.isContainer = True
self._getContainerProperty('container').add(pane)
self.widgetManager.add(self.Widgets.Pane, title, pane)
# now, add to top of stack
self._addContainer(title, self.Widgets.Pane, pane, 0, 1, sticky)
return pane
elif fType == self.Widgets.ScrollPane:
self.widgetManager.verify(self.Widgets.ScrollPane, title)
# naned used to diabled sctollbars
if name not in ["horizontal", "vertical", ""]:
gui.warn("ScrollPane %s: Invalid value for disabled, must be one of 'horizontal' or 'vertical'", title)
scrollPane = ScrollPane(self.getContainer(), disabled=name)
if not self.ttkFlag:
scrollPane.config(bg=self._getContainerBg())
scrollPane.isContainer = True
self._positionWidget(
scrollPane,
row,
column,
colspan,
rowspan,
sticky=sticky)
self.widgetManager.add(self.Widgets.ScrollPane, title, scrollPane)
# now, add to top of stack
self._addContainer(title, self.Widgets.ScrollPane, scrollPane, 0, 1, sticky)
return scrollPane
elif fType == self.Widgets.ToggleFrame:
self.widgetManager.verify(self.Widgets.ToggleFrame, title)
toggleFrame = ToggleFrame(self.getContainer(), title=title, bg=self._getContainerBg())
toggleFrame.configure(font=self._getContainerProperty('labelFont'))
toggleFrame.isContainer = True
self._positionWidget(
toggleFrame,
row,
column,
colspan,
rowspan,
sticky=sticky)
self._addContainer(title, self.Widgets.ToggleFrame, toggleFrame, 0, 1, "nw")
self.widgetManager.add(self.Widgets.ToggleFrame, title, toggleFrame)
return toggleFrame
elif fType == self.Widgets.PagedWindow:
# create the paged window
pagedWindow = PagedWindow(self.getContainer(), title=title, bg=self._getContainerBg(), width=200, height=400, buttonFont=self._getContainerProperty('buttonFont'), titleFont=self._getContainerProperty('labelFont'))
# bind events
self.topLevel.bind("<Left>", pagedWindow.showPrev)
self.topLevel.bind("<Control-Left>", pagedWindow.showFirst)
self.topLevel.bind("<Right>", pagedWindow.showNext)
self.topLevel.bind("<Control-Right>", pagedWindow.showLast)
# register it as a container
pagedWindow.isContainer = True
self._positionWidget(pagedWindow, row, column, colspan, rowspan, sticky=sticky)
self._addContainer(title, self.Widgets.PagedWindow, pagedWindow, 0, 1, "nw")
self.widgetManager.add(self.Widgets.PagedWindow, title, pagedWindow)
return pagedWindow
elif fType == self.Widgets.Page:
page = self._getContainerProperty('container').addPage()
page.isContainer = True
self._addContainer(title, self.Widgets.Page, page, 0, 1, sticky)
self.containerStack[-1]['expand'] = "None"
return page
elif fType == self.Widgets.FrameStack:
# create the paged window
frameStack = FrameStack(self.getContainer(), bg=self._getContainerBg())
self.widgetManager.add(self.Widgets.FrameStack, title, frameStack)
# register it as a container
frameStack.isContainer = True
self._positionWidget(frameStack, row, column, colspan, rowspan, sticky=sticky)
self._addContainer(title, self.Widgets.FrameStack, frameStack, 0, 1, "news")
return frameStack
elif fType == self.Widgets.Frame:
# first, make a Frame, and position it correctly
self.widgetManager.verify(self.Widgets.Frame, title)
container = self._makeAjFrame()(self.getContainer())
container.isContainer = True
container.config(background=self._getContainerBg())
self._positionWidget( container, row, column, colspan, rowspan, "nsew")
self.widgetManager.add(self.Widgets.Frame, title, container)
# now, add to top of stack
self._addContainer(title, self.Widgets.Frame, container, 0, 1, sticky)
return container
elif fType == self.Widgets.SubFrame:
subFrame = self._getContainerProperty('container').addFrame()
subFrame.isContainer = True
self._addContainer(title, self.Widgets.SubFrame, subFrame, 0, 1, "news")
self.widgetManager.add(self.Widgets.Frame, title, subFrame)
return subFrame
else:
raise Exception("Unknown container: " + fType)
#####################################
# Notebooks
#####################################
@contextmanager
def notebook(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
try:
note = self.startNotebook(title, row, column, colspan, rowspan, sticky)
except ItemLookupError:
note = self.openNotebook(title)
self.configure(**kwargs)
try: yield note
finally: self.stopNotebook()
def startNotebook(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
return self.startContainer(self.Widgets.Notebook, title, row, column, colspan, rowspan, sticky)
def stopNotebook(self):
# auto close the existing TAB - keep it?
if self._getContainerProperty('type') == self.Widgets.Note:
self.warn("You didn't STOP the previous NOTE")
self.stopContainer()
self.stopContainer()
@contextmanager
def note(self, title, tabTitle=None, **kwargs):
if tabTitle is None:
note = self.startNote(title)
else:
self.openNote(title, tabTitle)
self.configure(**kwargs)
try: yield note
finally: self.stopNote()
def startNote(self, title):
# auto close the previous TAB - keep it?
if self._getContainerProperty('type') == self.Widgets.Note:
self.warn("You didn't STOP the previous NOTE")
self.stopContainer()
elif self._getContainerProperty('type') != self.Widgets.Notebook:
raise Exception(
"Can't add a Note to the current container: ", self._getContainerProperty('type'))
self.startContainer(self.Widgets.Note, title)
def stopNote(self):
if self._getContainerProperty('type') != self.Widgets.Note:
raise Exception("Can't stop a NOTE, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
"""
def startCanvas(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="news"):
return self.startContainer(self.Widgets.Canvas, title)
def stopCanvas(self):
if self._getContainerProperty('type') != self.Widgets.Canvas:
raise Exception("Can't stop a CANVAS, currently in:", self._getContainerProperty('type'))
self.stopContainer()
@contextmanager
def canvas(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
try:
canvas = self.startCanvas(title, row, column, colspan, rowspan, sticky)
except ItemLookupError:
canvas = self.openCanvas(title)
try: yield canvas
finally: self.stopCanvas()
"""
#####################################
# Tabbed Frames
#####################################
#################################
# TabbedFrame Class
#################################
def _tabbedFrameMaker(self, master, useTtk=False, **kwargs):
global OrderedDict
if OrderedDict is None:
from collections import OrderedDict
class TabContainer(frameBase, object):
def __init__(self, master, **kwargs):
super(TabContainer, self).__init__(master, **kwargs)
Frame(self, borderwidth=0, height=2, highlightthickness=0, bg='darkGray').pack(side=TOP, expand=True, fill=X)
Frame(self, borderwidth=0, width=2, highlightthickness=0, bg='darkGray').pack(side=LEFT, fill=Y, expand=0)
class Tab(labelBase, object):
def __init__(self, master, func, text, **kwargs):
super(Tab, self).__init__(master, text=text, **kwargs)
self.disabled = False
self.DEFAULT_TEXT = text
self.hidden = False
self.bind("<Button-1>", lambda *args: func(text))
self.border = Frame(master, borderwidth=0, width=2, highlightthickness=0, bg='darkGray')
def rename(self, newName):
# use the DEFAULT_TEXT if necessary
if newName is None: newName = self.DEFAULT_TEXT
self.config(text=newName)
def hide(self):
self.hidden = True
self.border.pack_forget()
self.pack_forget()
def display(self, fill=False):
self.border.pack_forget()
self.pack_forget()
if not self.hidden:
if fill: self.pack(side=LEFT, ipady=4, ipadx=4, expand=True, fill=BOTH)
else: self.pack(side=LEFT, ipady=4, ipadx=4)
self.border.pack(side=LEFT, fill=Y, expand=0)
class TabbedFrame(frameBase, object):
def __init__(self, master, fill=False, changeOnFocus=True, font=None, **kwargs):
# main frame & tabContainer inherit BG colour
super(TabbedFrame, self).__init__(master, **kwargs)
self.fill = fill
self.selectedTab = None
self.changeOnFocus = changeOnFocus
self.changeEvent = None
# layout the grid
Grid.columnconfigure(self, 0, weight=1)
Grid.rowconfigure(self, 1, weight=1)
# create two containers
self.tabContainer = TabContainer(self, **kwargs)
self.panes = FrameStack(self)
# now grid minimised or stretched
if self.fill: self.tabContainer.grid(row=0, sticky=W + E)
else: self.tabContainer.grid(row=0, sticky=W)
self.panes.grid(row=1, sticky="NESW")
self.EMPTY_PANE = self.panes.addFrame()
# nain store dictionary: name = [tab, pane]
self.widgetStore = OrderedDict()
# looks
self.tabFont = font
if gui.GET_PLATFORM() == gui.MAC: self.inactiveCursor="pointinghand"
elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]: self.inactiveCursor="hand2"
# selected tab & all panes
self.activeFg = "#000000"
self.activeBg = "#F6F6F6"
# other tabs
self.inactiveFg = "#000000"
self.inactiveBg = "#DADADA"
# disabled tabs
self.disabledFg = "gray"
self.disabledBg = "darkGray"
if useTtk:
self.ttkStyle = ttk.Style()
self.ttkStyle.configure("ActiveTab.TLabel", foreground=self.activeFg, background=self.activeBg)
self.ttkStyle.configure("InactiveTab.TLabel", foreground=self.inactiveFg, background=self.inactiveBg)
self.ttkStyle.configure("DisabledTab.TLabel", foreground=self.disabledFg, background=self.disabledBg)
self.ttkStyle.configure("DisabledTab.TFrame", background=self.disabledBg)
self.EMPTY_PANE.config(style="DisabledTab.TFrame")
else:
self.EMPTY_PANE.config(bg=self.disabledBg)
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "activeforeground" in kw: self.activeFg = kw.pop("activeforeground")
if "activebackground" in kw: self.activeBg = kw.pop("activebackground")
if "fg" in kw: self.inactiveFg = kw.pop("fg")
if "inactivebackground" in kw: self.inactiveBg = kw.pop("inactivebackground")
if "inactiveforeground" in kw: self.inactiveFg = kw.pop("inactiveforeground")
if "disabledforeground" in kw: self.disabledFg = kw.pop("disabledforeground")
if "disabledbackground" in kw: self.disabledBg = kw.pop("disabledbackground")
if "bg" in kw: self.tabContainer.configure(bg=kw["bg"])
if "font" in kw: self.tabFont.config(kw.pop("font"))
if "command" in kw: self.changeEvent = kw.pop("command")
# just in case
if not useTtk:
self.EMPTY_PANE.config(bg=self.disabledBg)
else:
self.ttkStyle.configure("ActiveTab.TLabel", foreground=self.activeFg, background=self.activeBg)
self.ttkStyle.configure("InactiveTab.TLabel", foreground=self.inactiveFg, background=self.inactiveBg)
self.ttkStyle.configure("DisabledTab.TLabel", foreground=self.disabledFg, background=self.disabledBg)
self.ttkStyle.configure("DisabledTab.TFrame", background=self.disabledBg)
# update tabs if we have any
self._configTabs()
# propagate any left over confs
super(TabbedFrame, self).config(cnf, **kw)
def hideTab(self, title):
if title not in self.widgetStore.keys(): raise ItemLookupError("Invalid tab name: " + title)
self.widgetStore[title][0].hide()
if self.selectedTab == title:
self.selectedTab = None
self._findNewTab()
self._configTabs()
def deleteTab(self, title):
self.hideTab(title)
tab = self.widgetStore[title][0]
tab.border.destroy()
tab.destroy()
pane = self.widgetStore[title][1]
pane.grid_forget()
pane.destroy()
del self.widgetStore[title]
def showTab(self, title):
if title not in self.widgetStore.keys(): raise ItemLookupError("Invalid tab name: " + title)
self.widgetStore[title][0].hidden = False
self.expandTabs(self.fill)
if self.selectedTab == None:
self.changeTab(title)
def disableAllTabs(self, disabled=True):
for tab in self.widgetStore.keys():
self.disableTab(tab, disabled, refresh=False)
self._configTabs()
if disabled:
self.selectedTab = None
self.EMPTY_PANE.lift()
def disableTab(self, tabName, disabled=True, refresh=True):
if tabName not in self.widgetStore.keys(): raise ItemLookupError("Invalid tab name: " + tabName)
tab = self.widgetStore[tabName][0]
tab.disabled = disabled
if not disabled and not tab.hidden and self.selectedTab is None:
self.selectedTab = tabName
elif disabled and self.selectedTab == tabName:
self.selectedTab = None
if refresh: self._findNewTab()
if refresh:
self._configTabs()
def addTab(self, text, **kwargs):
# check for duplicates
if text in self.widgetStore: raise ItemLookupError("Duplicate tabName: " + text)
tab = Tab(self.tabContainer, text=text, func=self.changeTab, font=self.tabFont, **kwargs)
tab.display(self.fill)
# create the pane
pane = self.panes.addFrame()
if not useTtk:
pane.config(bg=self.activeBg)
# log the first tab as the selected tab
if self.selectedTab is None:
self.selectedTab = text
# log the widgets
self.widgetStore[text] = [tab, pane]
self._configTabs()
return pane
def getTab(self, title):
if title not in self.widgetStore.keys(): raise ItemLookupError("Invalid tab name: " + title)
else: return self.widgetStore[title][1]
def expandTabs(self, fill=True):
self.fill = fill
# update the tabConatiner
self.tabContainer.grid_forget()
if self.fill: self.tabContainer.grid(row=0, sticky=W + E)
else: self.tabContainer.grid(row=0, sticky=W)
for key in list(self.widgetStore.keys()):
tab = self.widgetStore[key][0]
tab.display(self.fill)
def renameTab(self, tabName, newName=None):
if tabName not in self.widgetStore.keys():
raise ItemLookupError("Invalid tab name: " + tabName)
self.widgetStore[tabName][0].rename(newName)
def changeTab(self, tabName):
if tabName not in self.widgetStore.keys(): raise ItemLookupError("Invalid tab name: " + tabName)
# stop if already selected or disabled
if self.selectedTab == tabName or self.widgetStore[tabName][0].disabled or self.widgetStore[tabName][0].hidden:
return
self.selectedTab = tabName
self._configTabs()
if self.changeEvent is not None: self.changeEvent()
def getSelectedTab(self):
return self.selectedTab
def setFont(self, **kwargs):
self.tabFont.config(**kwargs)
def _findNewTab(self):
for key in list(self.widgetStore.keys()):
if not self.widgetStore[key][0].disabled and not self.widgetStore[key][0].hidden:
self.changeTab(key)
return
# if we're here - all tabs are disabled
self.selectedTab = None
self.EMPTY_PANE.lift()
def _configTabs(self):
for key in list(self.widgetStore.keys()):
if self.widgetStore[key][0].disabled:
if not useTtk:
self.widgetStore[key][0].config(bg=self.disabledBg, fg=self.disabledFg, cursor="")
else:
self.widgetStore[key][0].config(style="DisabledTab.TLabel", cursor="")
else:
if key == self.selectedTab:
if not useTtk:
self.widgetStore[key][0].config(bg=self.widgetStore[key][1].cget('bg'), fg=self.activeFg, cursor="")
else:
self.widgetStore[key][0].config(style="SelectedTab.TLabel", cursor="")
self.widgetStore[key][1].lift()
else:
if not useTtk:
self.widgetStore[key][0].config(bg=self.inactiveBg, fg=self.inactiveFg, cursor=self.inactiveCursor)
else:
self.widgetStore[key][0].config(style="InactiveTab.TLabel", cursor=self.inactiveCursor)
return TabbedFrame(master, **kwargs)
@contextmanager
def tabbedFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
try:
tabs = self.startTabbedFrame(title, row, column, colspan, rowspan, sticky)
except ItemLookupError:
tabs = self.openTabbedFrame(title)
self.configure(**kwargs)
try: yield tabs
finally: self.stopTabbedFrame()
def startTabbedFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
return self.startContainer(self.Widgets.TabbedFrame, title, row, column, colspan, rowspan, sticky)
def stopTabbedFrame(self):
# auto close the existing TAB - keep it?
if self._getContainerProperty('type') == self.Widgets.Tab:
self.warn("You didn't STOP the previous TAB")
self.stopContainer()
self.stopContainer()
def setTabbedFrameTabExpand(self, title, expand=True):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.expandTabs(expand)
def setTabbedFrameSelectedTab(self, title, tab):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
try:
nb.changeTab(tab)
except KeyError:
raise ItemLookupError("Invalid tab name: " + str(tab))
def setTabbedFrameDisabledTab(self, title, tab, disabled=True):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.disableTab(tab, disabled)
def setTabbedFrameDisableAllTabs(self, title, disabled=True):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.disableAllTabs(disabled)
def deleteTabbedFrameTab(self, title, tab):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
self.cleanseWidgets(nb.getTab(tab))
nb.deleteTab(tab)
def showTabbedFrameTab(self, title, tab):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.showTab(tab)
def hideTabbedFrameTab(self, title, tab):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.hideTab(tab)
def setTabText(self, title, tab, newText=None):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.renameTab(tab, newText)
def setTabFont(self, title, **kwargs):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
nb.setFont(**kwargs)
def setTabBg(self, title, tab, colour):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
tab = nb.getTab(tab)
gui.SET_WIDGET_BG(tab, colour)
# tab.config(bg=colour)
#gui.SET_WIDGET_BG(tab, colour)
for child in tab.winfo_children():
gui.SET_WIDGET_BG(child, colour)
@contextmanager
def tab(self, title, tabTitle=None, **kwargs):
if tabTitle is None:
try:
tab = self.startTab(title)
except ItemLookupError:
if self._getContainerProperty('type') != self.Widgets.TabbedFrame:
raise Exception("Can't open a Tab in the current container: ", self._getContainerProperty('type'))
else:
tabTitle = self._getContainerProperty('title')
tab = self.openTab(tabTitle, title)
else:
tab = self.openTab(title, tabTitle)
self.configure(**kwargs)
try: yield tab
finally: self.stopTab()
def startTab(self, title):
# auto close the previous TAB - keep it?
if self._getContainerProperty('type') == self.Widgets.Tab:
self.warn("You didn't STOP the previous TAB")
self.stopContainer()
elif self._getContainerProperty('type') != self.Widgets.TabbedFrame:
raise Exception("Can't add a Tab to the current container: ", self._getContainerProperty('type'))
self.startContainer(self.Widgets.Tab, title)
def getTabbedFrameSelectedTab(self, title):
nb = self.widgetManager.get(self.Widgets.TabbedFrame, title)
return nb.getSelectedTab()
def stopTab(self):
if self._getContainerProperty('type') != self.Widgets.Tab:
raise Exception("Can't stop a TAB, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
#####################################
# Simple Tables
#####################################
def _getDbTables(self, db):
''' query the specified database, and get a list of table names '''
self._importSqlite3()
if not sqlite3:
self.error("Unable to load DB tables - can't load sqlite3")
return []
query = "SELECT DISTINCT tbl_name FROM sqlite_master ORDER BY tbl_name COLLATE NOCASE"
data = []
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(query)
for row in cursor:
data.append(row[0])
return data
def replaceDbTable(self, title, db, table):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.db = db
grid.dbTable = table
self._importSqlite3()
if not sqlite3:
self.error("Unable to load DB data - can't load sqlite3")
return
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
dataQuery = 'SELECT * from ' + table
# select all data
cursor.execute(dataQuery)
self.setTableHeaders(title, cursor)
self.replaceAllTableRows(title, cursor)
self.topLevel.update_idletasks()
def disableTableEntry(self, title, entryPos, disabled=True):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.disableEntry(entryPos, disabled=disabled)
def refreshDbTable(self, title):
grid = self.widgetManager.get(self.Widgets.Table, title)
self._importSqlite3()
if not sqlite3:
self.error("Unable to load DB data - can't load sqlite3")
return
with sqlite3.connect(grid.db) as conn:
cursor = conn.cursor()
dataQuery = 'SELECT * from ' + grid.dbTable
# select all data
cursor.execute(dataQuery)
self.replaceAllTableRows(title, cursor)
def refreshDbOptionBox(self, title, selected=None):
opt = self.widgetManager.get(self.Widgets.OptionBox, title)
data = self._getDbTables(opt.db)
self.changeOptionBox(title, data)
if selected is not None:
self.setOptionBox(title, selected)
def table(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets tables all in one go """
widgKind = self.Widgets.Table
kind = kwargs.pop("kind", 'normal')
action=kwargs.pop('action', None)
addRow=kwargs.pop('addRow', None)
actionHeading=kwargs.pop('actionHeading', "Action")
actionButton=kwargs.pop('actionButton', "Press")
addButton=kwargs.pop('addButton', "Add")
showMenu=kwargs.pop('showMenu', False)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if value is not None: self.replaceAllTableRows(title, value)
table = self.getTableEntries(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if kind == 'normal':
table = self.addTable(title, value, *args,
action=action, addRow=addRow, actionHeading=actionHeading, actionButton=actionButton,
addButton=addButton, showMenu=showMenu, **kwargs
)
else:
table = self.addDbTable(title, value, *args,
action=action, addRow=addRow, actionHeading=actionHeading, actionButton=actionButton,
addButton=addButton, showMenu=showMenu, **kwargs
)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return table
def addDbTable(self, title, value, table=table, row=None, column=0, colspan=0, rowspan=0,
action=None, addRow=None, actionHeading="Action", actionButton="Press",
addButton="Add", showMenu=False, border="solid", **kwargs):
''' creates a new Table, displaying the specified database & table '''
self._importSqlite3()
if not sqlite3:
self.error("Unable to load DB data - can't load sqlite3")
return
with sqlite3.connect(value) as conn:
cursor = conn.cursor()
dataQuery = 'SELECT * from ' + table
# select all data
cursor.execute(dataQuery)
grid = self.addTable(title, cursor, row, column, colspan, rowspan,
action, addRow, actionHeading, actionButton,
addButton, showMenu, border=border
)
grid.db = value
grid.dbTable = table
return grid
def addTable(self, title, data, row=None, column=0, colspan=0, rowspan=0, action=None, addRow=None,
actionHeading="Action", actionButton="Press", addButton="Add", showMenu=False, border="solid", **kwargs):
''' creates a new table, displaying the specified data '''
self.widgetManager.verify(self.Widgets.Table, title)
if not self.ttkFlag:
grid = SimpleTable(self.getContainer(), title, data,
action, addRow,
actionHeading, actionButton, addButton,
showMenu, buttonFont=self._getContainerProperty('buttonFont'),
font=self.tableFont, background=self._getContainerBg(),
queueFunction=self.queueFunction, border=border
)
else:
grid = SimpleTable(self.getContainer(), title, data,
action, addRow,
actionHeading, actionButton, addButton,
showMenu, buttonFont=self._getContainerProperty('buttonFont'),
queueFunction=self.queueFunction, border=border
)
self._positionWidget(grid, row, column, colspan, rowspan, N+E+S+W)
self.widgetManager.add(self.Widgets.Table, title, grid)
return grid
def getTableEntries(self, title):
return self.widgetManager.get(self.Widgets.Table, title).getEntries()
def getTableSelectedCells(self, title):
return self.widgetManager.get(self.Widgets.Table, title).getSelectedCells()
def selectTableRow(self, title, row, highlight=None):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.selectRow(row, highlight)
def selectTableColumn(self, title, col, highlight=None):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.selectColumn(col, highlight)
def addTableRow(self, title, data):
''' adds a new row of data to the specified table '''
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.addRow(data)
def addTableRows(self, title, data):
''' adds multiple rows of data to the specified table '''
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.addRows(data, scroll=True)
def addTableColumn(self, title, columnNumber, data):
''' adds a new column of data, in the specified position, to the specified table '''
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.addColumn(columnNumber, data)
def deleteTableColumn(self, title, columnNumber):
''' deletes the specified column from the specified table '''
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.deleteColumn(columnNumber)
def setTableHeaders(self, title, data):
''' change the headers in the specified table '''
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.setHeaders(data)
def deleteTableRow(self, title, rowNum):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.deleteRow(rowNum)
def deleteAllTableRows(self, title):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.deleteAllRows()
def sortTable(self, title, columnNumber, descending=False):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.sort(columnNumber, descending)
def getTableRowCount(self, title):
grid = self.widgetManager.get(self.Widgets.Table, title)
return grid.getRowCount()
def getTableRow(self, title, rowNumber):
grid = self.widgetManager.get(self.Widgets.Table, title)
return grid.getRow(rowNumber)
def confTable(self, title, field, value):
grid = self.widgetManager.get(self.Widgets.Table, title)
kw = {field:value}
grid.config(**kw)
def replaceTableRow(self, title, rowNum, data):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.replaceRow(rowNum, data)
def replaceAllTableRows(self, title, data):
grid = self.widgetManager.get(self.Widgets.Table, title)
grid.deleteAllRows()
grid.addRows(data, scroll=False)
# temporary deprecated functions
def addGrid(self, title, data, row=None, column=0, colspan=0, rowspan=0, action=None, addRow=None,
actionHeading="Action", actionButton="Press", addButton="Add", showMenu=False):
''' DEPRECATED - adds a new grid widget with the specified data '''
gui.warn("Deprecated - grids renamed to tables")
return self.addTable(title, data, row, column, colspan, rowspan, action, addRow, actionHeading, actionButton, addButton, showMenu)
def addDbGrid(self, title, db, table, row=None, column=0, colspan=0, rowspan=0, action=None, addRow=None,
actionHeading="Action", actionButton="Press", addButton="Add", showMenu=False):
''' DEPRECATED - adds a new table widget, with the specified database and table '''
gui.warn("Deprecated - grids renamed to tables")
return self.addDbTable(title, db, table, row, column, colspan, rowspan, action, addRow, actionHeading, actionButton, addButton, showMenu)
def replaceDbGrid(self, title, db, table):
gui.warn("Deprecated - grids renamed to tables")
return self.replaceDbTable(title, db, table)
def refreshDbGrid(self, title):
gui.warn("Deprecated - grids renamed to tables")
return self.refreshDbTable(title)
def selectGridRow(self, title, row, highlight=None):
gui.warn("Deprecated - grids renamed to tables")
return self.selectTableRow(title, row, highlight)
def getGridEntries(self, title):
gui.warn("Deprecated - grids renamed to tables")
return self.getTableEntries(title)
def getGridSelectedCells(self, title):
gui.warn("Deprecated - grids renamed to tables")
return self.getTableSelectedCells(title)
def selectGridColumn(self, title, col, highlight=None):
return self.selectTableColumn(title, col, highlight)
def addGridRow(self, title, data):
''' DEPRECATED - adds a row of data to the specified grid '''
return self.addTableRow(title, data)
def addGridRows(self, title, data):
''' DEPRECATED - adds new rows of data to the specified grid '''
return self.addTableRows(title, data)
def addGridColumn(self, title, columnNumber, data):
''' DEPRECATED - adds a column of data to the specified grid '''
return self.addTableColumn(title, columnNumber, data)
def deleteGridColumn(self, title, columnNumber):
return self.deleteTableColumn(title, columnNumber)
def setGridHeaders(self, title, data):
return self.setTableHeaders(title, data)
def deleteGridRow(self, title, rowNum):
return self.deleteTableRow(title, rowNum)
def deleteAllGridRows(self, title):
return self.deleteAllTableRows(title)
def sortGrid(self, title, columnNumber, descending=False):
return self.sortTable(title, columnNumber, descending)
def getGridRowCount(self, title):
return self.getTableRowCount(title)
def getGridRow(self, title, rowNumber):
return self.getTableRow(title, rowNumber)
def confGrid(self, title, field, value):
return self.confTable(title, field, value)
def replaceGridRow(self, title, rowNum, data):
return self.replaceTableRow(title, rowNum, data)
def replaceAllGridRows(self, title, data):
return self.replaceAllTableRows(title, data)
#####################################
# Paned Frames
#####################################
@contextmanager
def panedFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
reOpen = False
try:
pane = self.startPanedFrame(title, row, column, colspan, rowspan, sticky)
except ItemLookupError:
reOpen = True
pane = self.openPane(title)
self.configure(**kwargs)
try: yield pane
finally:
if reOpen:
self.stopContainer()
else:
self.stopPanedFrame()
def startPanedFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
self.startContainer(self.Widgets.PanedFrame, title, row, column, colspan, rowspan, sticky)
def stopPanedFrame(self):
if self._getContainerProperty('type') == self.Widgets.Pane:
self.stopContainer()
if self._getContainerProperty('type') != self.Widgets.PanedFrame:
raise Exception("Can't stop a PANEDFRAME, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
@contextmanager
def panedFrameVertical(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
reOpen = False
try:
pane = self.startPanedFrameVertical(title, row, column, colspan, rowspan, sticky)
except ItemLookupError:
reOpen = True
pane = self.openPane(title)
self.configure(**kwargs)
try: yield pane
finally:
if reOpen:
self.stopContainer()
else:
self.stopPanedFrame()
def startPanedFrameVertical( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
self.startPanedFrame(title, row, column, colspan, rowspan, sticky)
self.setPanedFrameVertical(title)
# make a PanedFrame align vertically
def setPanedFrameVertical(self, window):
pane = self.widgetManager.get(self.Widgets.PanedFrame, window)
pane.config(orient=VERTICAL)
#####################################
# Label Frames
#####################################
@contextmanager
def labelFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky=W, hideTitle=False, **kwargs):
name = kwargs.pop("label", kwargs.pop("name", None))
try:
lf = self.startLabelFrame(title, row, column, colspan, rowspan, sticky, hideTitle, name)
except ItemLookupError:
lf = self.openLabelFrame(title)
self.configure(**kwargs)
try: yield lf
finally: self.stopLabelFrame()
# sticky is alignment inside frame
# frame will be added as other widgets
def startLabelFrame(self, title, row=None, column=0, colspan=0, rowspan=0, sticky=W, hideTitle=False, label=None, name=None):
if label is not None: name = label
if hideTitle: name = ''
lf = self.startContainer(self.Widgets.LabelFrame, title, row, column, colspan, rowspan, sticky, name)
return lf
def stopLabelFrame(self):
if self._getContainerProperty('type') != self.Widgets.LabelFrame:
raise Exception("Can't stop a LABELFRAME, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
# function to set position of title for label frame
def setLabelFrameTitle(self, title, newTitle):
frame = self.widgetManager.get(self.Widgets.LabelFrame, title)
frame.config(text=newTitle)
#####################################
# Toggle Frames
#####################################
@contextmanager
def toggleFrame(self, title, row=None, column=0, colspan=0, rowspan=0, **kwargs):
try:
tog = self.startToggleFrame(title, row, column, colspan, rowspan)
except ItemLookupError:
tog = self.openToggleFrame(title)
self.configure(**kwargs)
try: yield tog
finally: self.stopToggleFrame()
###### TOGGLE FRAMES #######
def startToggleFrame(self, title, row=None, column=0, colspan=0, rowspan=0):
return self.startContainer(self.Widgets.ToggleFrame, title, row, column, colspan, rowspan, sticky="new")
def stopToggleFrame(self):
if self._getContainerProperty('type') != self.Widgets.ToggleFrame:
raise Exception("Can't stop a TOGGLEFRAME, currently in:",
self._getContainerProperty('type'))
self._getContainerProperty('container').stop()
self.stopContainer()
def toggleToggleFrame(self, title):
toggle = self.widgetManager.get(self.Widgets.ToggleFrame, title)
toggle.toggle()
def setToggleFrameText(self, title, newText):
toggle = self.widgetManager.get(self.Widgets.ToggleFrame, title)
toggle.config(text=newText)
def getToggleFrameState(self, title):
toggle = self.widgetManager.get(self.Widgets.ToggleFrame, title)
return toggle.isShowing()
#####################################
# Paged Windows
#####################################
@contextmanager
def pagedWindow(self, title, row=None, column=0, colspan=0, rowspan=0, **kwargs):
try:
pw = self.startPagedWindow(title, row, column, colspan, rowspan)
except ItemLookupError:
pw = self.openPagedWindow(title)
self.configure(**kwargs)
try: yield pw
finally: self.stopPagedWindow()
###### PAGED WINDOWS #######
def startPagedWindow(self, title, row=None, column=0, colspan=0, rowspan=0):
self.startContainer( self.Widgets.PagedWindow, title, row, column, colspan, rowspan, sticky="nsew")
def setPagedWindowPage(self, title, page):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
pager.showPage(page)
def setPagedWindowButtonsTop(self, title, top=True):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
pager.setNavPositionTop(top)
def setPagedWindowButtons(self, title, buttons):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
if not isinstance(buttons, list) or len(buttons) != 2:
raise Exception(
"You must provide a list of two strings for setPagedWinowButtons()")
pager.setPrevButton(buttons[0])
pager.setNextButton(buttons[1])
def setPagedWindowFunction(self, title, func):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
command = self.MAKE_FUNC(func, title)
pager.registerPageChangeEvent(command)
def getPagedWindowPageNumber(self, title):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
return pager.getPageNumber()
def showPagedWindowPageNumber(self, title, show=True):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
pager.showPageNumber(show)
def showPagedWindowTitle(self, title, show=True):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
pager.showTitle(show)
def setPagedWindowTitle(self, title, pageTitle):
pager = self.widgetManager.get(self.Widgets.PagedWindow, title)
pager.setTitle(pageTitle)
@contextmanager
def page(self, windowTitle=None, pageNumber=None, sticky="nw", **kwargs):
if windowTitle is None:
pg = self.startPage(sticky)
else:
pg = self.openPage(windowTitle, pageNumber)
self.configure(**kwargs)
try: yield pg
finally: self.stopPage()
def startPage(self, sticky="nw"):
if self._getContainerProperty('type') == self.Widgets.Page:
self.warn("You didn't STOP the previous PAGE")
self.stopPage()
elif self._getContainerProperty('type') != self.Widgets.PagedWindow:
raise Exception("Can't start a PAGE, currently in:",
self._getContainerProperty('type'))
self.containerStack[-1]['widgets'] = True
# generate a page title
pageNum = self._getContainerProperty('container').frameStack.getNumFrames() + 1
pageTitle = self._getContainerProperty('title') + "__" + str(pageNum)
self.startContainer(self.Widgets.Page, pageTitle, row=None, column=None, colspan=None, rowspan=None, sticky=sticky)
def stopPage(self):
if self._getContainerProperty('type') == self.Widgets.Page:
self.stopContainer()
else:
raise Exception("Can't stop PAGE, currently in:",
self._getContainerProperty('type'))
def stopPagedWindow(self):
if self._getContainerProperty('type') == self.Widgets.Page:
self.warn("You didn't STOP the previous PAGE")
self.stopPage()
if self._getContainerProperty('type') != self.Widgets.PagedWindow:
raise Exception("Can't stop a PAGEDWINDOW, currently in:",
self._getContainerProperty('type'))
self._getContainerProperty('container').stopPagedWindow()
self.stopContainer()
#####################################
# Scrolled Panes
#####################################
@contextmanager
def scrollPane(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
disabled = kwargs.pop("disabled", "")
try:
sp = self.startScrollPane(title, row, column, colspan, rowspan, sticky, disabled)
except ItemLookupError:
sp = self.openScrollPane(title)
self.configure(**kwargs)
try: yield sp
finally: self.stopScrollPane()
def startScrollPane(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", disabled=""):
self.startContainer(self.Widgets.ScrollPane, title, row, column, colspan, rowspan, sticky, disabled)
# functions to stop the various containers
def stopContainer(self): self._removeContainer()
def stopScrollPane(self):
if self._getContainerProperty('type') != self.Widgets.ScrollPane:
raise Exception("Can't stop a SCROLLPANE, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
def stopAllPanedFrames(self):
while True:
try:
self.stopPanedFrame()
except:
break
#####################################
# Frames
#####################################
@contextmanager
def frame(self, title=None, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
if title is None: # new subFrame
fr = self.startFrame(title, row, column, colspan, rowspan, sticky)
else:
frameNumber = kwargs.pop('frameNumber', None)
try:
if frameNumber is not None: fr = self.openSubFrame(title, frameNumber)
else: fr = self.openFrame(title)
except: # no widget
fr = self.startFrame(title, row, column, colspan, rowspan, sticky)
self.configure(**kwargs)
try: yield fr
finally: self.stopFrame()
def startFrame(self, title=None, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"):
frameType = self.Widgets.Frame
if self._getContainerProperty('type') == self.Widgets.FrameStack:
# generate a frame title
frameNum = self._getContainerProperty('container').getNumFrames()
title = self._getContainerProperty('title') + "__" + str(frameNum)
gui.trace("Adding new subFrame: %s", title)
self.containerStack[-1]['widgets'] = True
frameType = self.Widgets.SubFrame
else:
if title is None:
raise Exception("All frames must have a title")
gui.trace("Adding new frame: %s", title)
return self.startContainer(frameType, title, row, column, colspan, rowspan, sticky)
def stopFrame(self):
if self._getContainerProperty('type') not in [self.Widgets.Frame, self.Widgets.SubFrame]:
raise Exception("Can't stop a FRAME, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
def raiseFrame(self, title):
''' will bring the named frame in front of any others '''
gui.trace("Raising frame: %s", title)
self.widgetManager.get(self.Widgets.Frame, title).lift()
#####################################
# FrameStack
#####################################
@contextmanager
def frameStack(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW", **kwargs):
change = kwargs.pop("change", None)
start = kwargs.pop("start", -1)
try:
fr = self.startFrameStack(title, row, column, colspan, rowspan, sticky, change=change, start=start)
except ItemLookupError:
fr = self.openFrameStack(title)
self.configure(**kwargs)
try: yield fr
finally:
self.stopFrameStack()
def startFrameStack(self, title, row=None, column=0, colspan=0, rowspan=0, sticky="news", change=None, start=-1):
fs = self.startContainer(self.Widgets.FrameStack, title, row, column, colspan, rowspan, sticky)
fs.setChangeFunction(change)
fs.setStartFrame(start)
return fs
def stopFrameStack(self):
if self._getContainerProperty('type') != self.Widgets.FrameStack:
raise Exception("Can't stop a FRAMESTACK, currently in:",
self._getContainerProperty('type'))
self.stopContainer()
def setStartFrame(self, title, num):
self.widgetManager.get(self.Widgets.FrameStack, title).setStartFrame(num)
def nextFrame(self, title, callFunction=True):
self.widgetManager.get(self.Widgets.FrameStack, title).showNextFrame(callFunction)
def prevFrame(self, title, callFunction=True):
self.widgetManager.get(self.Widgets.FrameStack, title).showPrevFrame(callFunction)
def firstFrame(self, title, callFunction=True):
self.widgetManager.get(self.Widgets.FrameStack, title).showFirstFrame(callFunction)
def lastFrame(self, title, callFunction=True):
self.widgetManager.get(self.Widgets.FrameStack, title).showLastFrame(callFunction)
def selectFrame(self, title, num, callFunction=True):
if type(num) in (list, tuple): num = num[0]
num = int(num)
self.widgetManager.get(self.Widgets.FrameStack, title).showFrame(num, callFunction)
def countFrames(self, title):
return self.widgetManager.get(self.Widgets.FrameStack, title).getNumFrames()
def getCurrentFrame(self, title):
return self.widgetManager.get(self.Widgets.FrameStack, title).getCurrentFrame()
def getPreviousFrame(self, title):
return self.widgetManager.get(self.Widgets.FrameStack, title).getPreviousFrame()
def frameStackAtStart(self, title):
return self.widgetManager.get(self.Widgets.FrameStack, title).atStart()
def frameStackAtEnd(self, title):
return self.widgetManager.get(self.Widgets.FrameStack, title).atEnd()
#####################################
# SubWindows
#####################################
@contextmanager
def subWindow(self, name, title=None, modal=False, blocking=False, transient=False, grouped=True, **kwargs):
visible = kwargs.pop("visible", None)
try:
sw = self.startSubWindow(name, title, modal, blocking, transient, grouped)
except ItemLookupError:
sw = self.openSubWindow(name)
try:
yield sw
finally:
self.configure(**kwargs)
self.stopSubWindow()
if visible is True: self.showSubWindow(name)
def startSubWindow(self, name, title=None, modal=False, blocking=False, transient=False, grouped=True):
self.widgetManager.verify(self.Widgets.SubWindow, name)
gui.trace("Starting subWindow %s", name)
top = SubWindow(self, self.topLevel, name, title=title, stopFunc = self.confirmHideSubWindow,
modal=modal, blocking=blocking, transient=transient, grouped=grouped)
self.widgetManager.add(self.Widgets.SubWindow, name, top)
# now, add to top of stack
self._addContainer(name, self.Widgets.SubWindow, top, 0, 1, "")
# add an icon if required
if self.winIcon is not None:
self.setIcon(self.winIcon)
return top
def stopSubWindow(self):
container = self.containerStack[-1]
if container['type'] == self.Widgets.SubWindow:
if not hasattr(container["container"], 'ms'):
self.setMinSize(container["container"])
self.stopContainer()
else:
raise Exception("Can't stop a SUBWINDOW, currently in:",
self._getContainerProperty('type'))
def setSubWindowLocation(self, title, x, y):
self.widgetManager.get(self.Widgets.SubWindow, title).setLocation(x, y)
def showAllSubWindows(self):
for sub in self.widgetManager.group(self.Widgets.SubWindow):
self.showSubWindow(sub)
# functions to show/hide/destroy SubWindows
def showSubWindow(self, title, hide=False, follow=False):
tl = self.widgetManager.get(self.Widgets.SubWindow, title)
if hide:
self.hideAllSubWindows()
gui.trace("Showing subWindow %s", title)
tl.show()
self._bringToFront(tl)
tl.block()
return tl
def hideAllSubWindows(self, useStopFunction=False):
for sub in self.widgetManager.group(self.Widgets.SubWindow):
self.hideSubWindow(sub, useStopFunction)
def hideSubWindow(self, title, useStopFunction=False):
self.widgetManager.get(self.Widgets.SubWindow, title).hide(useStopFunction)
def confirmHideSubWindow(self, title):
self.hideSubWindow(title, True)
def destroySubWindow(self, title):
tl = self.widgetManager.get(self.Widgets.SubWindow, title)
tl.prepDestroy()
# get rid of all the kids!
self.cleanseWidgets(tl)
#####################################
# END containers
#####################################
# function to destroy widget & all children
# will also attempt to remove all trace from config dictionaries
def cleanseWidgets(self, widget):
# make sure we've cleansed any children first
for child in widget.winfo_children():
self.cleanseWidgets(child)
widgType = gui.GET_WIDGET_TYPE(widget)
if hasattr(widget, 'APPJAR_TYPE'):
widgType = widget.APPJAR_TYPE
gui.trace("Cleansing: %s", self.Widgets.name(widgType))
if widgType not in [self.Widgets.Tab, self.Widgets.Page]:
if not self.widgetManager.destroyWidget(widgType, widget):
self.warn("Unable to destroy %s, during cleanse - destroy returned False", widgType)
else:
self.trace("Skipped %s, cleansed by parent", widgType)
elif widgType in ('CanvasDnd', 'ValidationLabel'):
self.trace("Skipped %s, cleansed by parent", widgType)
else:
self.warn("Unable to destroy %s, during cleanse - no match", gui.GET_WIDGET_TYPE(widget))
# functions to hide & show the main window
def hide(self, btn=None):
self._getTopLevel().displayed = False
self._getTopLevel().withdraw()
def show(self, btn=None):
self._getTopLevel().displayed = True
self._getTopLevel().deiconify()
def setVisible(self, visible=True):
if visible: self.show()
else: self.hide()
def getVisible(self):
return self.topLevel.displayed
visible = property(getVisible, setVisible)
#####################################
# warn when bad functions called...
#####################################
def __getattr__(self, name):
def handlerFunction(*args, **kwargs):
self.warn("Unknown function: <%s> Check your spelling, do you need more camelCase?", name)
return handlerFunction
def __setattr__(self, name, value):
# would this create a new attribute?
if self.built and not hasattr(self, name):
raise AttributeError("Creating new attributes is not allowed!")
super(gui, self).__setattr__(name, value)
#####################################
# LabelBox Functions
#####################################
# this will build a frame, with a label on the left hand side
def _getLabelBox(self, title, **kwargs):
self.widgetManager.verify(self.Widgets.Label, title)
label = kwargs.pop('label', title)
if label is True: label = title
font = kwargs.pop('font', self._getContainerProperty('labelFont'))
# first, make a frame
frame = self._makeLabelBox()(self.getContainer())
if not self.ttkFlag:
frame.config(background=self._getContainerBg())
self.widgetManager.log(self.Widgets.FrameBox, frame)
# next make the label
if self.ttkFlag:
lab = ttk.Label(frame)
else:
lab = Label(frame, background=self._getContainerBg())
frame.theLabel = lab
lab.hidden = False
lab.inContainer = True
lab.config(
text=label,
anchor=W,
justify=LEFT,
font=font
)
if not self.ttkFlag:
lab.config(background=self._getContainerBg())
lab.DEFAULT_TEXT = label
self.widgetManager.add(self.Widgets.Label, title, lab)
self.widgetManager.add(self.Widgets.FrameLabel, title, lab)
# now put the label in the frame
lab.pack(side=LEFT, fill=Y)
return frame
# this is where we add the widget to the frame built above
def _packLabelBox(self, frame, widget):
widget.pack(side=LEFT, fill=BOTH, expand=True)
widget.inContainer = True
frame.theWidget = widget
#widget.grid( row=0, column=1, sticky=W+E )
#Grid.columnconfigure(frame, 1, weight=1)
#Grid.rowconfigure(frame, 0, weight=1)
# function to resize labels, if they are hidden or shown
# not using this for two reasons:
# - doesn't really work when font size changes
# - breaks when things in containers
def _updateLabelBoxes(self, title, column):
if len(title) >= self.labWidth.get(column, -1):
self.labWidth[column] = len(title)
# loop through other labels and resize
for na, wi in self.widgetManager.group(self.Widgets.FrameLabel).items():
col = wi.master.grid_info().get("column", wi.master.master.grid_info().get("column", -1))
if int(col) == column:
wi.config(width=self.labWidth[column])
#####################################
# FUNCTION for check boxes
#####################################
def tick(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for checkBox() """
return self.checkBox(title, value, *args, **kwargs)
def check(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for checkBox() """
return self.checkBox(title, value, *args, **kwargs)
def checkBox(self, title, value=None, *args, **kwargs):
""" adds, sets & gets checkBoxes all in one go """
widgKind = self.Widgets.CheckBox
callFunction = kwargs.pop("callFunction", True)
try: self.widgetManager.verify(widgKind, title)
except: #widget exists
if value is not None: self.setCheckBox(title, ticked=value, callFunction=callFunction)
check = self.getCheckBox(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
check = self._checkBoxMaker(title, *args, **kwargs)
if value is not None: self.setCheckBox(title, value)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return check
def _checkBoxMaker(self, title, value=None, kind="cb", row=None, column=0, colspan=0, rowspan=0, **kwargs):
""" internal wrapper to hide kwargs from original add functions """
name = kwargs.pop("name", kwargs.pop('label', None))
return self.addCheckBox(title, row, column, colspan, rowspan, name)
def addCheckBox(self, title, row=None, column=0, colspan=0, rowspan=0, name=None):
''' adds a new check box, at the specified position '''
self.widgetManager.verify(self.Widgets.CheckBox, title)
var = IntVar(self.topLevel)
if name is None:
name = title
if not self.ttkFlag:
cb = Checkbutton(self.getContainer(), text=name, variable=var)
cb.config(
font=self._getContainerProperty('labelFont'),
background=self._getContainerBg(),
activebackground=self._getContainerBg(),
anchor=W)
else:
cb = ttk.Checkbutton(self.getContainer(), text=name, variable=var)
cb.DEFAULT_TEXT = name
cb.bind("<Button-1>", self._grabFocus)
self.widgetManager.add(self.Widgets.CheckBox, title, cb)
self.widgetManager.add(self.Widgets.CheckBox, title, var, group=WidgetManager.VARS)
self._positionWidget(cb, row, column, colspan, rowspan, EW)
return cb
def addNamedCheckBox(self, name, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a new check box, at the specified position, with the name as the text '''
return self.addCheckBox(title, row, column, colspan, rowspan, name)
def getCheckBox(self, title):
bVar = self.widgetManager.get(self.Widgets.CheckBox, title, group=WidgetManager.VARS)
if bVar.get() == 1:
return True
else:
return False
def getAllCheckBoxes(self):
cbs = {}
for k in self.widgetManager.group(self.Widgets.CheckBox):
cbs[k] = self.getCheckBox(k)
return cbs
def setCheckBox(self, title, ticked=True, callFunction=True):
cb = self.widgetManager.get(self.Widgets.CheckBox, title)
bVar = self.widgetManager.get(self.Widgets.CheckBox, title, group=WidgetManager.VARS)
bVar.set(ticked)
if ticked:
if not self.ttkFlag:
cb.select()
else:
cb.state(['selected'])
else:
if not self.ttkFlag:
cb.deselect()
else:
cb.state(['!selected'])
# now call function
if callFunction:
if hasattr(cb, 'cmd'):
cb.cmd()
def clearAllCheckBoxes(self, callFunction=False):
for cb in self.widgetManager.group(self.Widgets.CheckBox):
self.setCheckBox(cb, ticked=False, callFunction=callFunction)
#####################################
# FUNCTION for scales
#####################################
def slider(self, title, value=None, *args, **kwargs):
""" simpleGUI - alternative for scale() """
return self.scale(title, value, *args, **kwargs)
def scale(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets scales all in one go """
widgKind = self.Widgets.Scale
vert = kwargs.pop("direction", "horizontal").lower() == "vertical"
increment = kwargs.pop("increment", None)
interval = kwargs.pop("interval", None)
show = kwargs.pop("show", False)
_range = kwargs.pop("range", None)
callFunction = kwargs.pop("callFunction", True)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
scale = self.getScale(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
scale = self._scaleMaker(title, *args, **kwargs)
if _range is not None: self.setScaleRange(title, _range[0], _range[1])
if vert: self.setScaleVertical(title)
if increment is not None: self.setScaleIncrement(title, increment)
if interval is not None: self.showScaleIntervals(title, interval)
if show: self.showScaleValue(title)
if value is not None: self.setScale(title, value, callFunction)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return scale
def _buildScale(self, title, frame):
self.widgetManager.verify(self.Widgets.Scale, title)
var = DoubleVar(self.topLevel)
if not self.ttkFlag:
scale = self._makeAjScale()(frame, increment=10, variable=var, repeatinterval=10, orient=HORIZONTAL, font=self._getContainerProperty('inputFont'))
scale.config(digits=1, showvalue=False, highlightthickness=1)
else:
scale = self._makeAjScale()(frame, increment=10, variable=var, orient=HORIZONTAL)
scale.bind("<Button-1>", self._grabFocus, "+")
scale.var = var
scale.inContainer = False
self.widgetManager.add(self.Widgets.Scale, title, scale)
return scale
def _scaleMaker(self, title, row=None, column=0, colspan=0, rowspan=0, **kwargs):
return self.addScale(title, row, column, colspan, rowspan)
def addScale(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a slidable scale at the specified position '''
scale = self._buildScale(title, self.getContainer())
self._positionWidget(scale, row, column, colspan, rowspan)
return scale
def addLabelScale(self, title, row=None, column=0, colspan=0, rowspan=0, label=True):
''' adds a slidable scale, with a label showing the title at the specified position '''
frame = self._getLabelBox(title, label=label)
scale = self._buildScale(title, frame)
self._packLabelBox(frame, scale)
self._positionWidget(frame, row, column, colspan, rowspan)
return scale
def getScale(self, title):
sc = self.widgetManager.get(self.Widgets.Scale, title)
return sc.get()
def getAllScales(self):
scales = {}
for k in self.widgetManager.group(self.Widgets.Scale):
scales[k] = self.getScale(k)
return scales
def setScale(self, title, pos, callFunction=True):
sc = self.widgetManager.get(self.Widgets.Scale, title)
with PauseCallFunction(callFunction, sc):
sc.set(pos)
def clearAllScales(self, callFunction=False):
for sc in self.widgetManager.group(self.Widgets.Scale):
self.setScale(sc, self.widgetManager.get(self.Widgets.Scale, sc).cget("from"), callFunction=callFunction)
def setScaleIncrement(self, title, increment):
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.increment = increment
def setScaleLength(self, title, length):
if not self.ttkFlag:
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(sliderlength=length)
else:
self.warn("ttk: setScaleLength() not supported: %s", title)
# this will make the scale show interval numbers
# set to 0 to remove
def showScaleIntervals(self, title, intervals):
if not self.ttkFlag:
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(tickinterval=intervals)
else:
self.warn("ttk: showScaleIntervals() not supported: %s", title)
# this will make the scale show its value
def showScaleValue(self, title, show=True):
if not self.ttkFlag:
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(showvalue=show)
else:
self.warn("ttk: showScaleValue() not supported: %s", title)
# change the orientation (Hor or Vert)
def setScaleVertical(self, title):
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(orient=VERTICAL)
def setScaleHorizontal(self, title):
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(orient=HORIZONTAL)
def setScaleRange(self, title, start, end, curr=None):
if curr is None:
curr = start
sc = self.widgetManager.get(self.Widgets.Scale, title)
sc.config(from_=start, to=end)
self.setScale(title, curr)
# set the increment as 10%
try:
res = sc.cget("resolution")
diff = int((((end - start)/res)/10)+0.99) # add 0.99 to round up...
sc.increment = diff
except:
pass # resolution not supported in ttk
#####################################
# FUNCTION for optionMenus
#####################################
def combo(self, title, value=None, *args, **kwargs):
""" shortner for optionBox() """
return self.optionBox(title, value, *args, **kwargs)
def option(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for optionBox() """
return self.optionBox(title, value, *args, **kwargs)
def optionBox(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets optionBoxes all in one go """
widgKind = self.Widgets.OptionBox
kind = kwargs.pop("kind", "standard").lower().strip()
label = kwargs.pop("label", False)
callFunction = kwargs.pop("callFunction", True)
override = kwargs.pop("override", False)
checked = kwargs.pop("checked", True)
selected = kwargs.pop("selected", None)
disabled = kwargs.pop("disabled", "-")
try: self.widgetManager.verify(self.Widgets.OptionBox, title)
except: # widget exists
if value is not None: self.setOptionBox(title, index=value, value=checked, callFunction=callFunction, override=override)
opt = self.getOptionBox(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if kind == "ticks":
if label: opt = self.addLabelTickOptionBox(title, value, *args, label=label, disabled=disabled, **kwargs)
else: opt = self.addTickOptionBox(title, value, *args, disabled=disabled, **kwargs)
else:
if label: opt = self.addLabelOptionBox(title, value, *args, label=label, disabled=disabled, **kwargs)
else: opt = self.addOptionBox(title, value, *args, disabled=disabled, **kwargs)
if selected is not None: self.setOptionBox(title, selected)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return opt
def addDbOptionBox(self, title, db, row=None, column=0, colspan=0, rowspan=0, **kwargs):
''' adds an option box, with a list of tables form the specified database '''
data = self._getDbTables(db)
opt = self.option(title, data, row, column, colspan, rowspan, **kwargs)
opt.db = db
return opt
def _buildOptionBox(self, frame, title, options, kind="normal", disabled='-'):
""" Internal wrapper, used for building OptionBoxes.
It will use the kind to choose either a standard OptionBox or a TickOptionBox.
ref: http://stackoverflow.com/questions/29019760/how-to-create-a-combobox-that-includes-checkbox-for-each-item
:param frame: this should be a container, used as the parent for the OptionBox
:param title: the key used to reference this OptionBox
:param options: a list of values to put in the OptionBox, can be len 0
:param kind: the style of OptionBox: notmal or ticks
:returns: the created OptionBox
:raises ItemLookupError: if the title is already in use
"""
self.widgetManager.verify(self.Widgets.OptionBox, title)
# create a string var to hold selected item
var = StringVar(self.topLevel)
self.widgetManager.add(self.Widgets.OptionBox, title, var, group=WidgetManager.VARS)
maxSize, options = self._configOptionBoxList(title, options, kind)
if len(options) > 0 and kind == "normal":
option = ajOption(frame, var, *options)
var.set(options[0])
option.kind = "normal"
elif kind == "ticks":
option = ajOption(frame, variable=var, value="")
self._buildTickOptionBox(title, option, options)
else:
option = ajOption(frame, var, [])
option.kind = "normal"
option.config(
justify=LEFT,
font=self._getContainerProperty('inputFont'),
# background=self._getContainerBg(),
highlightthickness=0,
width=maxSize,
takefocus=1)
option.bind("<Button-1>", self._grabFocus)
# compare on windows & mac
#option.config(highlightthickness=12, bd=0, highlightbackground=self._getContainerBg())
option.var = var
option.maxSize = maxSize
option.inContainer = False
option.options = options
option.disabled = disabled
option.DEFAULT_TEXT=""
if options is not None:
option.DEFAULT_TEXT='\n'.join(str(x) for x in options)
# if self.platform == self.MAC:
# option.config(highlightbackground=self._getContainerBg())
option.bind("<Tab>", self._focusNextWindow)
option.bind("<Shift-Tab>", self._focusLastWindow)
# add a right click menu
self._addRightClickMenu(option)
# disable any separators
self._disableOptionBoxSeparators(option)
# add to array list
self.widgetManager.add(self.Widgets.OptionBox, title, option)
return option
def _buildTickOptionBox(self, title, option, options):
""" Internal wrapper, used for building TickOptionBoxes.
Called by _buildOptionBox & changeOptionBox.
Will add each of the options as a tick box, and use the title as a disabled header.
:param title: the key used to reference this OptionBox
:param option: an existing OptionBox that will be emptied & repopulated
:param options: a list of values to put in the OptionBox, can be len 0
:returns: None - the option param is modified
:raises ItemLookupError: if the title can't be found
"""
# delete any items - either the initial one when created, or any existing ones if changing
option['menu'].delete(0, 'end')
var = self.widgetManager.get(self.Widgets.OptionBox, title, group=WidgetManager.VARS)
var.set(title)
vals = {}
for o in options:
vals[o] = BooleanVar()
option['menu'].add_checkbutton( label=o, onvalue=True, offvalue=False, variable=vals[o])
self.widgetManager.update(self.Widgets.TickOptionBox, title, vals, group=WidgetManager.VARS)
option.kind = "ticks"
def addOptionBox(self, title, options, row=None, column=0, colspan=0, rowspan=0, disabled='-', **kwargs):
""" Adds a new standard OptionBox.
Simply calls internal function _buildOptionBox.
:param title: the key used to reference this OptionBox
:param options: a list of values to put in the OptionBox, can be len 0
:returns: the created OptionBox
:raises ItemLookupError: if the title is already in use
"""
option = self._buildOptionBox(self.getContainer(), title, options, disabled=disabled)
self._positionWidget(option, row, column, colspan, rowspan)
return option
def addLabelOptionBox(self, title, options, row=None, column=0, colspan=0, rowspan=0, disabled="-", **kwargs):
""" Adds a new standard OptionBox, with a Label before it.
Simply calls internal function _buildOptionBox, placing it in a LabelBox.
:param title: the key used to reference this OptionBox and text for the Label
:param options: a list of values to put in the OptionBox, can be len 0
:returns: the created OptionBox (not the LabelBox)
:raises ItemLookupError: if the title is already in use
"""
frame = self._getLabelBox(title, **kwargs)
option = self._buildOptionBox(frame, title, options, disabled=disabled)
self._packLabelBox(frame, option)
self._positionWidget(frame, row, column, colspan, rowspan)
return option
def addTickOptionBox(self, title, options, row=None, column=0, colspan=0, rowspan=0, disabled="-", **kwargs):
""" Adds a new TickOptionBox.
Simply calls internal function _buildOptionBox.
:param title: the key used to reference this TickOptionBox
:param options: a list of values to put in the TickOptionBox, can be len 0
:returns: the created TickOptionBox
:raises ItemLookupError: if the title is already in use
"""
tick = self._buildOptionBox(self.getContainer(), title, options, kind="ticks", disabled=disabled)
self._positionWidget(tick, row, column, colspan, rowspan)
return tick
def addLabelTickOptionBox(self, title, options, row=None, column=0, colspan=0, rowspan=0, disabled="-", **kwargs):
""" Adds a new TickOptionBox, with a Label before it
Simply calls internal function _buildOptionBox, placing it in a LabelBox
:param title: the key used to reference this TickOptionBox, and text for the Label
:param options: a list of values to put in the TickOptionBox, can be len 0
:returns: the created TickOptionBox (not the LabelBox)
:raises ItemLookupError: if the title is already in use
"""
frame = self._getLabelBox(title, **kwargs)
tick = self._buildOptionBox(frame, title, options, kind="ticks", disabled=disabled)
self._packLabelBox(frame, tick)
self._positionWidget(frame, row, column, colspan, rowspan)
return tick
def getOptionBox(self, title):
""" Gets the selected item from the named OptionBox
:param title: the OptionBox to check
:returns: the selected item in an OptionBox or a dictionary of all items and their status for a TickOptionBox
:raises ItemLookupError: if the title can't be found
"""
box = self.widgetManager.get(self.Widgets.OptionBox, title)
if box.kind == "ticks":
val = self.widgetManager.get(self.Widgets.TickOptionBox, title, group=WidgetManager.VARS)
retVal = {}
for k, v in val.items():
retVal[k] = bool(v.get())
return retVal
else:
val = self.widgetManager.get(self.Widgets.OptionBox, title, group=WidgetManager.VARS)
val = val.get().strip()
# set to None if it's a divider
if val.startswith("-") or len(val) == 0:
val = None
return val
def getAllOptionBoxes(self):
""" Convenience function to get the selected items for all OptionBoxes in the GUI.
:returns: a dictionary containing the result of calling getOptionBox for every OptionBox/TickOptionBox in the GUI
"""
boxes = {}
for k in self.widgetManager.group(self.Widgets.OptionBox):
boxes[k] = self.getOptionBox(k)
return boxes
def _disableOptionBoxSeparators(self, box):
""" Loops through all items in box and if they start with a dash, disables them
:param box: the OptionBox to process
:returns: None
"""
for pos, item in enumerate(box.options):
if item.startswith(box.disabled):
box["menu"].entryconfigure(pos, state="disabled")
else:
box["menu"].entryconfigure(pos, state="normal")
def _configOptionBoxList(self, title, options, kind):
""" Tidies up the list provided when an OptionBox is created/changed
:param title: the title for the OptionBox - only used by TickOptionBox to calculate max size
:param options: the list to tidy
:param kind: The kind of option box (normal or ticks)
:returns: a tuple containing the maxSize (width) and tidied list of items
"""
# deal with a dict_keys object - messy!!!!
if not isinstance(options, list):
options = list(options)
# make sure all options are strings
options = [str(i) for i in options]
# check for empty strings, replace first with message, remove rest
found = False
newOptions = []
for pos, item in enumerate(options):
if str(item).strip() == "":
if not found:
newOptions.append("- options -")
found = True
else:
newOptions.append(item)
options = newOptions
# get the longest string length
try:
maxSize = len(str(max(options, key=len)))
except:
try:
maxSize = len(str(max(options)))
except:
maxSize = 0
# increase if ticks
if kind == "ticks":
if len(title) > maxSize:
maxSize = len(title)
# new bug?!? - doesn't fit anymore!
if self.platform == self.MAC:
maxSize += 3
return maxSize, options
def changeOptionBox(self, title, options, index=None, callFunction=False):
""" Changes the entire contents of the named OptionBox
ref: http://www.prasannatech.net/2009/06/tkinter-optionmenu-changing-choices.html
:param title: the OptionBox to change
:param options: the new values to put in the OptionBox
:param index: an optional initial value to select
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
:raises ItemLookupError: if the title can't be found
"""
# get the optionBox & associated var
box = self.widgetManager.get(self.Widgets.OptionBox, title)
# tidy up list and get max size
maxSize, options = self._configOptionBoxList(title, options, "normal")
# warn if new options bigger
if maxSize > box.maxSize:
self.warn("The new options are wider then the old ones: %s > %s", maxSize, box.maxSize)
if box.kind == "ticks":
self._buildTickOptionBox(title, box, options)
else:
# delete the current options
box['menu'].delete(0, 'end')
# add the new items
for option in options:
box["menu"].add_command(
label=option, command=lambda temp=option: box.setvar(
box.cget("textvariable"), value=temp))
with PauseCallFunction(callFunction, box):
box.var.set(options[0])
box.options = options
# disable any separators
self._disableOptionBoxSeparators(box)
# select the specified option
self.setOptionBox(title, index, callFunction=False, override=True)
def deleteOptionBox(self, title, index):
""" Deleted the specified item from the named OptionBox
:param title: the OptionBox to change
:param inde: the value to delete - either a numeric index, or the text of an item
:returns: None
:raises ItemLookupError: if the title can't be found
"""
self.widgetManager.check(self.Widgets.OptionBox, title, group=WidgetManager.VARS)
self.setOptionBox(title, index, value=None, override=True)
def renameOptionBoxItem(self, title, item, newName=None, callFunction=False):
""" Changes the text of the specified item in the named OptionBox
:param title: the OptionBox to change
:param item: the item to rename
:param newName: the value to rename it with
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
:raises ItemLookupError: if the title can't be found
"""
self.widgetManager.check(self.Widgets.OptionBox, title, group=WidgetManager.VARS)
self.setOptionBox(title, item, value=newName, callFunction=callFunction)
def clearOptionBox(self, title, callFunction=True):
""" Deselects any items selected in the named OptionBox
If a TickOptionBox, all items will be set to False (unticked)
:param title: the OptionBox to change
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
:raises ItemLookupError: if the title can't be found
"""
box = self.widgetManager.get(self.Widgets.OptionBox, title)
if box.kind == "ticks":
# loop through each tick, set it to False
ticks = self.widgetManager.get(self.Widgets.TickOptionBox, title, group=WidgetManager.VARS)
for k in ticks:
self.setOptionBox(title, k, False, callFunction=callFunction)
else:
self.setOptionBox(title, 0, callFunction=callFunction, override=True)
def clearAllOptionBoxes(self, callFunction=False):
""" Convenience function to clear all OptionBoxes in the GUI
Will simply call clearOptionBox on each OptionBox/TickOptionBox
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
"""
for k in self.widgetManager.group(self.Widgets.OptionBox):
self.clearOptionBox(k, callFunction)
def setOptionBoxDisabledChar(self, title, disabled="-"):
box = self.widgetManager.get(self.Widgets.OptionBox, title)
box.disabled = disabled
self._disableOptionBoxSeparators(box)
def setOptionBox(self, title, index, value=True, callFunction=True, override=False):
""" Main purpose is to select/deselect the item at the specified position
But will also: delete an item if value is set to None or rename an item if value is set to a String
:param title: the OptionBox to change
:param index: the position or value of the item to select/delete
:param value: determines what to do to the item: if set to None, will delete the item, else it sets the items state
:param callFunction: whether to generate an event to notify that the widget has changed
:param override: if set to True, allows a disabled item to be selected
:returns: None
:raises ItemLookupError: if the title can't be found
"""
box = self.widgetManager.get(self.Widgets.OptionBox, title)
if box.kind == "ticks":
gui.trace("Updating tickOptionBox")
ticks = self.widgetManager.get(self.Widgets.TickOptionBox, title, group=WidgetManager.VARS)
if index is None:
gui.trace("Index empty - nothing to update")
return
elif index in ticks:
gui.trace("Updating: %s", index)
tick = ticks[index]
try:
index_num = box.options.index(index)
except:
self.warn("Unknown tick: %s in OptionBox: %s", index, title)
return
with PauseCallFunction(callFunction, tick, useVar=False):
if value is None: # then we need to delete it
gui.trace("Deleting tick: %s from OptionBox %s", index, title)
box['menu'].delete(index_num)
del(box.options[index_num])
self.widgetManager.remove(self.Widgets.TickOptionBox, title, index, group=WidgetManager.VARS)
elif isinstance(value, bool):
gui.trace("Updating tick: %s from OptionBox: %s to: %s", index, title, value)
tick.set(value)
else:
gui.trace("Renaming tick: %s from OptionBox: %s to: %s", index, title, value)
ticks = self.widgetManager.get(self.Widgets.TickOptionBox, title, group=WidgetManager.VARS)
ticks[value] = ticks.pop(index)
box.options[index_num] = value
self.changeOptionBox(title, box.options)
for tick in ticks:
self.widgetManager.get(self.Widgets.TickOptionBox, title, group=WidgetManager.VARS)[tick].set(ticks[tick].get())
else:
if value is None:
self.warn("Unknown tick in deleteOptionBox: %s in OptionBox: %s" , index, title)
else:
self.warn("Unknown tick in setOptionBox: %s in OptionBox: %s", index, title)
else:
gui.trace("Updating regular optionBox: %s at: %s to: %s", title, index, value)
count = len(box.options)
if count > 0:
if index is None:
index = 0
if not isinstance(index, int):
try:
index = box.options.index(index)
except:
if value is None:
self.warn("Unknown option in deleteOptionBox: %s in OptionBox: %s", index, title)
else:
self.warn("Unknown option in setOptionBox: %s in OptionBox: %s", index, title)
return
gui.trace("--> index now: %s", index)
if index < 0 or index > count - 1:
self.warn("Invalid option: %s. Should be between 0 and %s." , count-1, index)
else:
if value is None: # then we can delete it...
gui.trace("Deleting option: %s from OptionBox: %s", index, title)
box['menu'].delete(index)
del(box.options[index])
self.setOptionBox(title, 0, callFunction=False, override=override)
elif isinstance(value, bool):
gui.trace("Updating: OptionBox: %s to: %s", title, index)
with PauseCallFunction(callFunction, box):
if not box['menu'].invoke(index):
if override:
gui.trace("Setting OptionBox: %s to disabled option: %s", title, index)
box["menu"].entryconfigure(index, state="normal")
box['menu'].invoke(index)
box["menu"].entryconfigure(index, state="disabled")
else:
self.warn("Unable to set disabled option: %s in OptionBox %s. Try setting 'override=True'", index, title)
else:
gui.trace("Invoked item: %s", index)
else:
gui.trace("Renaming: %s from OptionBox: %s to: %s", index, title, value)
pos = box.options.index(self.widgetManager.get(self.Widgets.OptionBox, title, group=WidgetManager.VARS).get())
box.options[index] = value
self.changeOptionBox(title, box.options, pos)
else:
self.widgetManager.get(self.Widgets.OptionBox, title, group=WidgetManager.VARS).set("")
self.warn("No items to select from: %s", title)
#####################################
# FUNCTION for GoogleMaps
#####################################
def map(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets maps all in one go """
widgKind = self.Widgets.Map
zoom = kwargs.pop("zoom", None)
size = kwargs.pop("size", None)
terrain = kwargs.pop("terrain", None)
proxy = kwargs.pop("proxy", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
gMap = self.getLabel(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
gMap = self.addGoogleMap(title, *args, **kwargs)
if value is not None: self.setGoogleMapLocation(title, value)
if zoom is not None: self.setGoogleMapZoom(title, zoom)
if size is not None: self.setGoogleMapSize(title, size)
if terrain is not None: self.setGoogleMapTerrain(title, terrain)
if proxy is not None: self.setGoogleMapProxy(title, proxy)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return gMap
def addGoogleMap(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a GoogleMap widget at the specified position '''
self._loadURL()
self._loadTooltip()
if urlencode is False:
raise Exception("Unable to load GoogleMaps - urlencode library not available")
self.widgetManager.verify(self.Widgets.Map, title)
gMap = GoogleMap(self.getContainer(), self, useTtk = self.ttkFlag, font=self._getContainerProperty('labelFont'))
self._positionWidget(gMap, row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.Map, title, gMap)
return gMap
def setGoogleMapProxy(self, title, proxyString):
gMap = self.widgetManager.get(self.Widgets.Map, title)
gMap.setProxyString(proxyString)
def setGoogleMapLocation(self, title, location):
self.searchGoogleMap(title, location)
def searchGoogleMap(self, title, location):
gMap = self.widgetManager.get(self.Widgets.Map, title)
gMap.changeLocation(location)
def setGoogleMapTerrain(self, title, terrain):
gMap = self.widgetManager.get(self.Widgets.Map, title)
if terrain not in gMap.TERRAINS:
raise Exception("Invalid terrain. Must be one of " + str(gMap.TERRAINS))
gMap.changeTerrain(terrain)
def setGoogleMapZoom(self, title, mod):
self. zoomGoogleMap(title, mod)
def zoomGoogleMap(self, title, mod):
gMap = self.widgetManager.get(self.Widgets.Map, title)
if mod in ["+", "-"]:
gMap.zoom(mod)
elif isinstance(mod, int) and 0 <= mod <= 22:
gMap.setZoom(mod)
def setGoogleMapSize(self, title, size):
gMap = self.widgetManager.get(self.Widgets.Map, title)
gMap.setSize(size)
def setGoogleMapMarker(self, title, location, size=None, colour=None, label=None, replace=False):
gMap = self.widgetManager.get(self.Widgets.Map, title)
if len(location) == 0:
gMap.removeMarkers()
else:
gMap.addMarker(location, size, colour, label, replace)
def removeGoogleMapMarker(self, title, label):
gMap = self.widgetManager.get(self.Widgets.Map, title)
if len(label) == 0:
gMap.removeMarkers()
else:
gMap.removeMarker(label)
def getGoogleMapZoom(self, title):
return self.widgetManager.get(self.Widgets.Map, title).params["zoom"]
def getGoogleMapTerrain(self, title):
return self.widgetManager.get(self.Widgets.Map, title).params["maptype"].title()
def getGoogleMapLocation(self, title):
return self.widgetManager.get(self.Widgets.Map, title).params["center"]
def getGoogleMapSize(self, title):
return self.widgetManager.get(self.Widgets.Map, title).params["size"]
def saveGoogleMap(self, title, fileLocation):
gMap = self.widgetManager.get(self.Widgets.Map, title)
return gMap.saveTile(fileLocation)
#####################################
# FUNCTION for matplotlib
#####################################
def plot(self, title, *args, **kwargs):
""" simpleGUI - adds, sets & gets plots all in one go """
widgKind = self.Widgets.Plot
t = kwargs.pop("t", None)
s = kwargs.pop("s", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
plot = self.getPlot(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
plot = self.addPlotFig(title, *args, **kwargs)
if t is not None:
self.updatePlot(title, t, s)
if len(kwargs) > 0:
# ignore for now
# will contain positional args
pass
return plot
def addPlot(self, title, t, s, row=None, column=0, colspan=0, rowspan=0, width=None, height=None):
''' adds a MatPlotLib, with t/s plotted '''
canvas, fig = self._addPlotFig(title, row, column, colspan, rowspan, width, height)
axes = fig.add_subplot(111)
axes.plot(t,s)
canvas.axes = axes
return axes
def addPlotFig(self, title, row=None, column=0, colspan=0, rowspan=0, width=None, height=None):
canvas, fig = self._addPlotFig(title, row, column, colspan, rowspan, width, height)
return fig
def _addPlotFig(self, title, row=None, column=0, colspan=0, rowspan=0, width=None, height=None):
self.widgetManager.verify(self.Widgets.Plot, title)
self._loadMatplotlib()
if FigureCanvasTkAgg is False:
raise Exception("Unable to load MatPlotLib - plots not available")
else:
fig = Figure(tight_layout=True)
if width is not None and height is not None:
fig.set_size_inches(width,height,forward=True)
canvas = FigureCanvasTkAgg(fig, self.getContainer())
canvas._tkcanvas.config(background="#c0c0c0", borderwidth=0, highlightthickness=0)
canvas.fig = fig
canvas.show()
# canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
self._positionWidget(canvas.get_tk_widget(), row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.Plot, title, canvas)
return canvas, fig
def refreshPlot(self, title):
canvas = self.widgetManager.get(self.Widgets.Plot, title)
canvas.draw()
def updatePlot(self, title, t, s, keepLabels=False):
axes = self.widgetManager.get(self.Widgets.Plot, title).axes
if keepLabels:
xLab = axes.get_xlabel()
yLab = axes.get_ylabel()
pTitle = axes.get_title()
handles, legends = axes.get_legend_handles_labels()
axes.clear()
axes.plot(t, s)
if keepLabels:
axes.set_xlabel(xLab)
axes.set_ylabel(yLab)
axes.set_title(pTitle)
axes.legend(handles, legends)
self.refreshPlot(title)
return axes
#####################################
# FUNCTION to manage Properties Widgets
#####################################
def properties(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets properties all in one go """
widgKind = self.Widgets.Properties
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
# if value is not None:
# need to work out args...
# self.setProperty(title, prop=value)
props = self.getProperties(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
props = self.addProperties(title, value, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return props
def addProperties(self, title, values=None, row=None, column=0, colspan=0, rowspan=0, **kwargs):
''' adds a new properties widget, displaying the dictionary of booleans as tick boxes '''
self.widgetManager.verify(self.Widgets.Properties, title)
haveTitle = True
if self._getContainerProperty('type') == self.Widgets.ToggleFrame:
self.containerStack[-1]['sticky'] = "ew"
haveTitle = False
props = Properties(self.getContainer(), title, values, haveTitle,
font=self._getContainerProperty('labelFont'), background=self._getContainerBg())
self._positionWidget(props, row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.Properties, title, props)
return props
def getProperties(self, title):
props = self.widgetManager.get(self.Widgets.Properties, title)
return props.getProperties()
def getAllProperties(self):
props = {}
for k in self.widgetManager.group(self.Widgets.Properties):
props[k] = self.getProperties(k)
return props
def getProperty(self, title, prop):
props = self.widgetManager.get(self.Widgets.Properties, title)
return props.getProperty(prop)
def setProperty(self, title, prop, value=False, callFunction=True):
props = self.widgetManager.get(self.Widgets.Properties, title)
props.addProperty(prop, value, callFunction=callFunction)
def setProperties(self, title, props, callFunction=True):
p = self.widgetManager.get(self.Widgets.Properties, title)
p.addProperties(props, callFunction=callFunction)
def deleteProperty(self, title, prop):
props = self.widgetManager.get(self.Widgets.Properties, title)
props.addProperty(prop, None, callFunction=False)
def setPropertyText(self, title, prop, newText=None):
props = self.widgetManager.get(self.Widgets.Properties, title)
props.renameProperty(prop, newText)
def clearProperties(self, title, callFunction=True):
props = self.widgetManager.get(self.Widgets.Properties, title)
props.clearProperties(callFunction)
def clearAllProperties(self, callFunction=False):
props = {}
for k in self.widgetManager.group(self.Widgets.Properties):
self.clearProperties(k, callFunction)
def resetProperties(self, title, callFunction=True):
props = self.widgetManager.get(self.Widgets.Properties, title)
props.resetProperties(callFunction)
def resetAllProperties(self, callFunction=False):
props = {}
for k in self.widgetManager.group(self.Widgets.Properties):
self.resetProperties(k, callFunction)
#####################################
# FUNCTION to add spin boxes
#####################################
def spin(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for spinBox() """
return self.spinBox(title, value, *args, **kwargs)
def spinBox(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets spinBoxes all in one go """
widgKind = self.Widgets.SpinBox
endValue = kwargs.pop("endValue", None)
pos = kwargs.pop("pos", None)
item = kwargs.pop("item", None)
label = kwargs.pop("label", False)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if value is not None: self.setSpinBoxPos(title, value, *args, **kwargs)
spinBox = self.getSpinBox(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if endValue is not None:
if label: spinBox = self.addLabelSpinBoxRange(title, fromVal=value, toVal=endValue, *args, label=label, **kwargs)
else: spinBox = self.addSpinBoxRange(title, fromVal=value, toVal=endValue, *args, **kwargs)
else:
if label: spinBox = self.addLabelSpinBox(title, value, *args, label=label, **kwargs)
else: spinBox = self.addSpinBox(title, value, *args, **kwargs)
if pos is not None: self.setSpinBoxPos(title, pos)
if item is not None: self.setSpinBox(title, item)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return spinBox
def _buildSpinBox(self, frame, title, vals):
self.widgetManager.verify(self.Widgets.SpinBox, title)
if type(vals) not in [list, tuple]:
raise Exception("Can't create SpinBox " + title + ". Invalid values: " + str(vals))
spin = Spinbox(frame)
spin.var = StringVar(self.topLevel)
spin.config(textvariable=spin.var)
spin.inContainer = False
spin.isRange = False
spin.config(font=self._getContainerProperty('inputFont'), highlightthickness=0)
# adds bg colour under spinners
# if self.platform == self.MAC:
# spin.config(highlightbackground=self._getContainerBg())
spin.bind("<Tab>", self._focusNextWindow)
spin.bind("<Shift-Tab>", self._focusLastWindow)
# store the vals in DEFAULT_TEXT
spin.DEFAULT_TEXT=""
if vals is not None:
spin.DEFAULT_TEXT='\n'.join(str(x) for x in vals)
# make sure it's a list
# reverse it, so the spin box functions properly
vals = list(vals)
vals.reverse()
vals = tuple(vals)
spin.config(values=vals)
# prevent invalid entries
if self.validateSpinBox is None:
self.validateSpinBox = (
self.containerStack[0]['container'].register(
self._validateSpinBox), '%P', '%W')
spin.config(validate='all', validatecommand=self.validateSpinBox)
self.widgetManager.add(self.Widgets.SpinBox, title, spin)
return spin
def _addSpinBox(self, title, values, row=None, column=0, colspan=0, rowspan=0):
spin = self._buildSpinBox(self.getContainer(), title, values)
self._positionWidget(spin, row, column, colspan, rowspan)
self.setSpinBoxPos(title, 0)
return spin
def addSpinBox(self, title, values, row=None, column=0, colspan=0, rowspan=0, **kwargs):
''' adds a spinbox, with the specified values '''
return self._addSpinBox(title, values, row, column, colspan, rowspan)
def addLabelSpinBox(self, title, values, row=None, column=0, colspan=0, rowspan=0, **kwargs):
''' adds a spinbox, with the specified values, and a label displaying the title '''
frame = self._getLabelBox(title, **kwargs)
spin = self._buildSpinBox(frame, title, values)
self._packLabelBox(frame, spin)
self._positionWidget(frame, row, column, colspan, rowspan)
self.setSpinBoxPos(title, 0)
return spin
def addSpinBoxRange(self, title, fromVal, toVal, row=None, column=0, colspan=0, rowspan=0, **kwargs):
''' adds a spinbox, with a range of whole numbers '''
vals = list(range(fromVal, toVal + 1))
spin = self._addSpinBox(title, vals, row, column, colspan, rowspan)
spin.isRange = True
return spin
def addLabelSpinBoxRange(self, title, fromVal, toVal, row=None, column=0, colspan=0, rowspan=0, label=True, **kwargs):
''' adds a spinbox, with a range of whole numbers, and a label displaying the title '''
vals = list(range(fromVal, toVal + 1))
spin = self.addLabelSpinBox(title, vals, row, column, colspan, rowspan, label=label)
spin.isRange = True
return spin
def getSpinBox(self, title):
spin = self.widgetManager.get(self.Widgets.SpinBox, title)
return spin.get()
def getAllSpinBoxes(self):
boxes = {}
for k in self.widgetManager.group(self.Widgets.SpinBox):
boxes[k] = self.getSpinBox(k)
return boxes
# validates that an item in the named spinbox starts with the user_input
def _validateSpinBox(self, user_input, widget_name):
spin = self.containerStack[0]['container'].nametowidget(widget_name)
vals = spin.cget("values") # .split()
vals = self._getSpinBoxValsAsList(vals)
for i in vals:
if i.startswith(user_input):
return True
self.containerStack[0]['container'].bell()
return False
# expects a valid spin box widget, and a valid value
def _setSpinBoxVal(self, spin, val, callFunction=True):
# now call function
with PauseCallFunction(callFunction, spin):
spin.var.set(val)
# is it going to be a hash or list??
def _getSpinBoxValsAsList(self, vals):
vals.replace("{", "")
vals.replace("}", "")
# if "{" in vals:
# vals = vals[1:-1]
# vals = vals.split("} {")
# else:
vals = vals.split()
return vals
def setSpinBox(self, title, value, callFunction=True):
spin = self.widgetManager.get(self.Widgets.SpinBox, title)
vals = spin.cget("values") # .split()
vals = self._getSpinBoxValsAsList(vals)
val = str(value)
if val not in vals:
raise Exception( "Invalid value: " + val + ". Not in SpinBox: " +
title + "=" + str(vals))
self._setSpinBoxVal(spin, val, callFunction)
def clearAllSpinBoxes(self, callFunction=False):
for sb in self.widgetManager.group(self.Widgets.SpinBox):
self.setSpinBoxPos(sb, 0, callFunction=callFunction)
def setSpinBoxPos(self, title, pos, callFunction=True):
spin = self.widgetManager.get(self.Widgets.SpinBox, title)
vals = spin.cget("values") # .split()
vals = self._getSpinBoxValsAsList(vals)
pos = int(pos)
if pos < 0 or pos >= len(vals):
raise Exception( "Invalid position: " + str(pos) + ". No position in SpinBox: " +
title + "=" + str(vals))
pos = len(vals) - 1 - pos
val = vals[pos]
self._setSpinBoxVal(spin, val, callFunction)
def changeSpinBox(self, title, vals):
spin = self.widgetManager.get(self.Widgets.SpinBox, title)
if spin.isRange:
self.warn("Can't convert %s RangeSpinBox to SpinBox", title)
else:
vals = list(vals)
vals.reverse()
vals = tuple(vals)
spin.config(values=vals)
self.setSpinBoxPos(title, 0)
#####################################
# FUNCTION to add images
#####################################
def image(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets images all in one go """
widgKind = self.Widgets.Image
kind = kwargs.pop("kind", "standard").lower().strip()
speed = kwargs.pop("speed", None)
drop = kwargs.pop("drop", None)
over = kwargs.pop("over", None)
submit = kwargs.pop("submit", None)
_map = kwargs.pop("map", None)
try: self.widgetManager.verify(widgKind, title)
except: # already exists
if value is not None:
if kind == "data":
self.setImageData(title, value, **kwargs)
elif kind == "icon":
gui.warn("Changing image icons not yet supported: %s.", title)
else:
self.setImage(title, value)
image = self.getImage(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if kind == "icon":
image = self.addIcon(title, value, *args, **kwargs)
elif kind == "data":
image = self.addImageData(title, value, *args, **kwargs)
else:
image = self.addImage(title, value, *args, **kwargs)
if speed is not None: self.setAnimationSpeed(title, speed)
if over is not None: self.setImageMouseOver(title, over)
if submit is not None:
if _map is not None: self.setImageMap(title, submit, _map)
else: self.setImageSubmitFunction(title, submit)
elif submit is None and _map is not None:
gui.warn("Must specify a submit function when setting an image map: %s", title)
if drop is not None: self.setImageDropTarget(title, drop)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return image
# looks up label containing image
def _animateImage(self, title, firstTime=False):
if not self.alive: return
try:
lab = self.widgetManager.get(self.Widgets.Image, title)
except ItemLookupError:
# image destroyed...
try: self.widgetManager.remove(self.Widgets.AnimationID, title)
except: pass
return
if not lab.image.animating:
self.widgetManager.remove(self.Widgets.AnimationID, title)
return
if firstTime and lab.image.alreadyAnimated:
return
lab.image.alreadyAnimated = True
try:
if lab.image.cached:
pic = lab.image.pics[lab.image.anim_pos]
else:
pic = PhotoImage(file=lab.image.path,
format="gif - {0}".format(lab.image.anim_pos))
lab.image.pics.append(pic)
lab.image.anim_pos += 1
lab.config(image=pic)
anim_id = self.topLevel.after(int(lab.image.anim_speed), self._animateImage, title)
self.widgetManager.update(self.Widgets.AnimationID, title, anim_id)
except IndexError:
# will be thrown when we reach end of anim images
lab.image.anim_pos = 0
lab.image.cached = True
self._animateImage(title)
except TclError:
# will be thrown when all images cached
lab.image.anim_pos = 0
lab.image.cached = True
self._animateImage(title)
def _preloadAnimatedImage(self, img):
if not self.alive: return
if img.cached:
return
try:
pic = PhotoImage(file=img.path,
format="gif - {0}".format(img.anim_pos))
img.pics.append(pic)
img.anim_pos += 1
self.preloadAnimatedImageId = self.topLevel.after(
0, self._preloadAnimatedImage, img)
# when all frames have been processed
except TclError as e:
# expected - when all images cached
img.anim_pos = 0
img.cached = True
def _configAnimatedImage(self, img):
img.alreadyAnimated = False
img.isAnimated = True
img.pics = []
img.cached = False
img.anim_pos = 0
img.anim_speed = 150
img.animating = True
# simple way to check if image is animated
def _checkIsAnimated(self, name):
if imghdr.what(name) == "gif":
try:
PhotoImage(file=name, format="gif - 1")
return True
except:
pass
return False
def setAnimationSpeed(self, name, speed):
img = self.widgetManager.get(self.Widgets.Image, name).image
if speed < 1:
speed = 1
self.warn("Setting %s speed to 1. Minimum animation speed is 1.", name)
img.anim_speed = int(speed)
def stopAnimation(self, name):
img = self.widgetManager.get(self.Widgets.Image, name).image
img.animating = False
def startAnimation(self, name):
img = self.widgetManager.get(self.Widgets.Image, name).image
if not img.animating:
img.animating = True
anim_id = self.topLevel.after(img.anim_speed, self._animateImage, name)
self.widgetManager.update(self.Widgets.AnimationID, name, anim_id)
# function to set an alternative image, when a mouse goes over
def setImageMouseOver(self, title, overImg):
lab = self.widgetManager.get(self.Widgets.Image, title)
# first check over image & cache it
fullPath = self.getImagePath(overImg)
self.topLevel.after(0, self._getImage, fullPath)
leaveImg = lab.image.path
lab.bind("<Leave>", lambda e: self.setImage(title, leaveImg, True))
lab.bind("<Enter>", lambda e: self.setImage(title, fullPath, True))
lab.hasMouseOver = True
# function to set an image location
def setImageLocation(self, location):
if os.path.isdir(location):
self.userImages = location
else:
raise Exception("Invalid image location: " + location)
# get the full path of an image (including image folder)
def getImagePath(self, imagePath):
if imagePath is None:
return None
if self.userImages is not None:
imagePath = os.path.join(self.userImages, imagePath)
absPath = os.path.abspath(imagePath)
return absPath
# function to see if an image has changed
def hasImageChanged(self, originalImage, newImage):
newAbsImage = self.getImagePath(newImage)
if originalImage is None:
return True
# filename has changed
if originalImage.path != newAbsImage:
return True
# modification time has changed
if originalImage.modTime != os.path.getmtime(newAbsImage):
return True
# no changes
return False
# function to remove image objects form cache
def clearImageCache(self):
self.widgetManager.clear(self.Widgets.ImageCache)
# internal function to build an image function from a string
def _getImageData(self, imageData, fmt="gif"):
if fmt=="png":
self._importPngimagetk()
if PngImageTk is False:
raise Exception("TKINTERPNG library not found, PNG files not supported: imageData")
if sys.version_info >= (2, 7):
self.warn("Image processing for .PNGs is slow. .GIF is the recommended format")
# png = PngImageTk(imagePath)
# png.convert()
# photo = png.image
else:
raise Exception("PNG images only supported in python 3: imageData")
elif fmt == "gif":
imgObj = PhotoImage(data=imageData)
else:
# expect we already have a PhotoImage object, for example created by PIL
imgObj = imageData
imgObj.path = None
imgObj.modTime = datetime.datetime.now()
imgObj.isAnimated = False
imgObj.animating = False
return imgObj
# internal function to check/build image object
def _getImage(self, imagePath, checkCache=True, addToCache=True):
if imagePath is None:
return None
# get the full image path
imagePath = self.getImagePath(imagePath)
# if we're caching, and we have a non-None entry in the cache - get it...
photo = None
if checkCache and imagePath in self.widgetManager.group(self.Widgets.ImageCache) and self.widgetManager.get(self.Widgets.ImageCache, imagePath) is not None:
photo = self.widgetManager.get(self.Widgets.ImageCache, imagePath)
# if the image hasn't changed, use the cache
if not self.hasImageChanged(photo, imagePath):
pass
# else load a new one
elif os.path.isfile(imagePath):
if os.access(imagePath, os.R_OK):
imgType = imghdr.what(imagePath)
if imgType is None:
raise Exception( "Invalid file: " + imagePath + " is not a valid image")
elif not imagePath.lower().endswith(imgType) and not (
imgType == "jpeg" and imagePath.lower().endswith("jpg")):
# the image has been saved with the wrong extension
raise Exception(
"Invalid image extension: " +
imagePath +
" should be a ." +
imgType)
elif imagePath.lower().endswith('.gif'):
photo = PhotoImage(file=imagePath)
elif imagePath.lower().endswith('.ppm') or imagePath.lower().endswith('.pgm'):
photo = PhotoImage(file=imagePath)
elif imagePath.lower().endswith('jpg') or imagePath.lower().endswith('jpeg'):
self.warn("Image processing for .JPGs is slow. .GIF is the recommended format")
photo = self.convertJpgToBmp(imagePath)
elif imagePath.lower().endswith('.png'):
# known issue here, some PNGs lack IDAT chunks
# also, PNGs seem broken on python<3, maybe around the map
# function used to generate pixel maps
self._importPngimagetk()
if PngImageTk is False:
raise Exception(
"TKINTERPNG library not found, PNG files not supported: " + imagePath)
if sys.version_info >= (2, 7):
self.warn("Image processing for .PNGs is slow. .GIF is the recommended format")
png = PngImageTk(imagePath)
png.convert()
photo = png.image
else:
raise Exception("PNG images only supported in python 3: " + imagePath)
else:
raise Exception("Invalid image type: " + imagePath)
else:
raise Exception("Can't read image: " + imagePath)
else:
raise Exception("Image " + imagePath + " does not exist")
# store the full path to this image
photo.path = imagePath
# store the modification time
photo.modTime = os.path.getmtime(imagePath)
# sort out if it's an animated image
if self._checkIsAnimated(imagePath):
self._configAnimatedImage(photo)
self._preloadAnimatedImage(photo)
else:
photo.isAnimated = False
photo.animating = False
if addToCache:
self.widgetManager.update(self.Widgets.ImageCache, imagePath, photo)
return photo
def getImageDimensions(self, name):
img = self.widgetManager.get(self.Widgets.Image, name).image
return img.width(), img.height()
# force replace the current image, with a new one
def reloadImage(self, name, imageFile):
label = self.widgetManager.get(self.Widgets.Image, name)
image = self._getImage(imageFile, False)
self._populateImage(name, image)
def reloadImageData(self, name, imageData, fmt="gif"):
self.setImageData(name, imageData, fmt)
def setImageData(self, name, imageData, fmt="gif"):
label = self.widgetManager.get(self.Widgets.Image, name)
image = self._getImageData(imageData, fmt=fmt)
self._populateImage(name, image)
# replace the current image, with a new one
def getImage(self, name):
label = self.widgetManager.get(self.Widgets.Image, name)
return label.image.path
def setImage(self, name, imageFile, internal=False):
label = self.widgetManager.get(self.Widgets.Image, name)
imageFile = self.getImagePath(imageFile)
# only set the image if it's different
if label.image.path == imageFile:
self.warn("Not updating %s, %s hasn't changed." , name, imageFile)
return
elif imageFile is None:
return
else:
image = self._getImage(imageFile)
self._populateImage(name, image, internal)
# internal function to update the image in a label
def _populateImage(self, name, image, internal=False):
label = self.widgetManager.get(self.Widgets.Image, name)
label.image.animating = False
label.config(image=image)
label.config(anchor=CENTER, font=self._getContainerProperty('labelFont'))
if not self.ttkFlag:
label.config(background=self._getContainerBg())
label.image = image # keep a reference!
if image.isAnimated:
anim_id = self.topLevel.after(
image.anim_speed + 100,
self._animateImage,
name,
True)
self.widgetManager.update(self.Widgets.AnimationID, name, anim_id)
if not internal and label.hasMouseOver:
leaveImg = label.image.path
label.bind("<Leave>", lambda e: self.setImage(name, leaveImg, True))
# removed - keep the label the same size, and crop images
#h = image.height()
#w = image.width()
#label.config(height=h, width=w)
self.topLevel.update_idletasks()
# function to configure an image map
def setImageMap(self, name, func, coords):
self._setWidgetMap(name, self.Widgets.Image, func, coords)
def _setWidgetMap(self, name, _type, func, coords):
widget = self.widgetManager.get(_type, name)
rectangles = []
if len(coords) > 0:
for k, v in coords.items():
rect = AjRectangle(k, AjPoint(v[0], v[1]), v[2]-v[0], v[3]-v[1])
rectangles.append(rect)
widget.MAP_COORDS = rectangles
widget.MAP_FUNC = func
widget.bind("<Button-1>", lambda e: self._widgetMap(_type, name, e), add="+")
# function called when an image map is clicked
def _widgetMap(self, _type, name, event):
widget = self.widgetManager.get(_type, name)
for rect in widget.MAP_COORDS:
if rect.contains(AjPoint(event.x, event.y)):
widget.MAP_FUNC(rect.name)
return
widget.MAP_FUNC("UNKNOWN: " + str(event.x) + ", " + str(event.y))
def addImage(self, name, imageFile, row=None, column=0, colspan=0, rowspan=0, compound=None):
''' Adds an image at the specified position '''
self.widgetManager.verify(self.Widgets.Image, name)
imgObj = self._getImage(imageFile)
self._addImageObj(name, imgObj, row, column, colspan, rowspan, compound=compound)
self.widgetManager.get(self.Widgets.Image, name).hasMouseOver = False
return imgObj
def addIcon(self, name, iconName, row=None, column=0, colspan=0, rowspan=0, compound=None):
''' adds one of the built-in icons at the specified position '''
icon = os.path.join(self.icon_path, iconName.lower()+".png")
with PauseLogger():
return self.addImage(name, icon, row, column, colspan, rowspan, compound=compound)
def addImageData(self, name, imageData, row=None, column=0, colspan=0, rowspan=0, fmt="gif", compound=None):
''' load image from base-64 encoded GIF
use base64 module to convert binary data to base64 '''
self.widgetManager.verify(self.Widgets.Image, name)
imgObj = self._getImageData(imageData, fmt)
self._addImageObj(name, imgObj, row, column, colspan, rowspan, compound=compound)
self.widgetManager.get(self.Widgets.Image, name).hasMouseOver = False
return imgObj
def _addImageObj(self, name, img, row=None, column=0, colspan=0, rowspan=0, compound=None):
if not self.ttkFlag:
label = Label(self.getContainer())
label.config(background=self._getContainerBg())
else:
label = ttk.Label(self.getContainer())
label.config(anchor=CENTER, font=self._getContainerProperty('labelFont'),image=img)
label.image = img # keep a reference!
if compound is not None:
label.config(text=name, compound=compound)
if img is not None and compound is None and not self.ttkFlag:
h = img.height()
w = img.width()
label.config(height=h, width=w)
self.widgetManager.add(self.Widgets.Image, name, label)
self._positionWidget(label, row, column, colspan, rowspan)
if img.isAnimated:
anim_id = self.topLevel.after(
img.anim_speed, self._animateImage, name, True)
self.widgetManager.update(self.Widgets.AnimationID, name, anim_id)
def setImageSize(self, name, width, height):
img = self.widgetManager.get(self.Widgets.Image, name)
img.config(height=height, width=width)
# def rotateImage(self, name, image):
# img = self.widgetManager.get(self.Widgets.Image, name)
# if +ve then grow, else shrink...
def zoomImage(self, name, x, y=''):
if x <= 0:
self.shrinkImage(name, x * -1, y * -1)
else:
self.growImage(name, x, y)
# get every nth pixel (must be an integer)
# 0 will return an empty image, 1 will return the image, 2 will be 1/2 the
# size ...
def shrinkImage(self, name, x, y=''):
label = self.widgetManager.get(self.Widgets.Image, name)
image = label.image.subsample(x, y)
label.config(image=image)
label.config(anchor=CENTER, font=self._getContainerProperty('labelFont'))
if not self.ttkFlag:
label.config(background=self._getContainerBg())
label.config(width=image.width(), height=image.height())
label.modImage = image # keep a reference!
# get every nth pixel (must be an integer)
# 0 won't work, 1 will return the original size
def growImage(self, name, x, y=''):
label = self.widgetManager.get(self.Widgets.Image, name)
image = label.image.zoom(x, y)
label.config(image=image)
label.config(anchor=CENTER, font=self._getContainerProperty('labelFont'))
if not self.ttkFlag:
label.config(background=self._getContainerBg())
label.config(width=image.width(), height=image.height())
label.modImage = image # keep a reference!
def convertJpgToBmp(self, image):
self._loadNanojpeg()
if nanojpeg is False:
raise Exception(
"nanojpeg library not found, unable to display jpeg files: " + image)
elif sys.version_info < (2, 7):
raise Exception(
"JPG images only supported in python 2.7+: " + image)
else:
# read the image into an array of bytes
with open(image, 'rb') as inFile:
buf = array.array(str('B'), inFile.read())
# init the translator, and decode the array of bytes
nanojpeg.njInit()
nanojpeg.njDecode(buf, len(buf))
# determine a file name & type
if nanojpeg.njIsColor():
# fileName = image.split('.jpg', 1)[0] + '.ppm'
param = 6
else:
# fileName = image.split('.jpg', 1)[0] + '.pgm'
# fileName = "test3.pgm"
param = 5
# create a string, starting with the header
val = "P%d\n%d %d\n255\n" % (
param, nanojpeg.njGetWidth(), nanojpeg.njGetHeight())
# append the bytes, converted to chars
val = str(val) + str('').join(map(chr, nanojpeg.njGetImage()))
# release any stuff
nanojpeg.njDone()
photo = PhotoImage(data=val)
return photo
# write the chars to a new file, if python3 we need to encode them first
# with open(fileName, "wb") as outFile:
# if sys.version_info[0] == 2: outFile.write(val)
# else: outFile.write(val.encode('ISO-8859-1'))
#
# return fileName
# function to set a background image
# make sure this is done before everything else, otherwise it will cover
# other widgets
def setBgImage(self, image):
image = self._getImage(image, False, False) # make sure it's not using the cache
# self.containerStack[0]['container'].config(image=image) # window as a
# label doesn't work...
self.bgLabel.config(image=image)
self.containerStack[0]['container'].image = image # keep a reference!
def removeBgImage(self):
self.bgLabel.config(image="")
# self.containerStack[0]['container'].config(image=None) # window as a
# label doesn't work...
# remove the reference - shouldn't be cached
self.containerStack[0]['container'].image = None
def resizeBgImage(self):
if self.containerStack[0]['container'].image is None:
return
else:
pass
#####################################
# FUNCTION to play sounds
#####################################
# function to set a sound location
def setSoundLocation(self, location):
if os.path.isdir(location):
self.userSounds = location
else:
raise Exception("Invalid sound location: " + location)
# internal function to manage sound availability
def _soundWrap(self, sound, isFile=False, repeat=False, wait=False):
self._loadWinsound()
if self.platform == self.WINDOWS and winsound is not False:
sound = self._translateSound(sound)
if self.userSounds is not None and sound is not None:
sound = os.path.join(self.userSounds, sound)
if isFile:
if os.path.isfile(sound) is False:
raise Exception("Can't find sound: " + sound)
if not sound.lower().endswith('.wav'):
raise Exception("Invalid sound format: " + sound)
kind = winsound.SND_FILENAME
if not wait:
kind = kind | winsound.SND_ASYNC
else:
if sound is None:
kind = winsound.SND_FILENAME
else:
kind = winsound.SND_ALIAS
if not wait:
kind = kind | winsound.SND_ASYNC
if repeat:
kind = kind | winsound.SND_LOOP
winsound.PlaySound(sound, kind)
else:
# sound not available at this time
raise Exception(
"Sound not supported on this platform: " +
platform())
def playSound(self, sound, wait=False):
self._soundWrap(sound, True, False, wait)
def stopSound(self):
self._soundWrap(None)
def loopSound(self, sound):
self._soundWrap(sound, True, True)
def soundError(self):
self._soundWrap("SystemHand")
def soundWarning(self):
self._soundWrap("SystemAsterisk")
def bell(self):
self.containerStack[0]['container'].bell()
def playNote(self, note, duration=200):
self._loadWinsound()
if self.platform == self.WINDOWS and winsound is not False:
try:
if isinstance(note, STRING):
freq = self.NOTES[note.lower()]
else:
freq = note
except KeyError:
raise Exception("Error: cannot play note - " + note)
try:
if isinstance(duration, STRING):
length = self.DURATIONS[duration.upper()]
else:
length = duration
except KeyError:
raise Exception("Error: cannot play duration - " + duration)
try:
winsound.Beep(freq, length)
except RuntimeError:
raise Exception(
"Sound not available on this platform: " +
platform())
else:
# sound not available at this time
raise Exception(
"Sound not supported on this platform: " +
platform())
#####################################
# FUNCTION for radio buttons
#####################################
def radio(self, title, name=None, *args, **kwargs):
""" simpleGUI - shortner for radioButton() """
return self.radioButton(title, name, *args, **kwargs)
def radioButton(self, title, name=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets radioButtons all in one go """
widgKind = self.Widgets.RadioButton
selected = kwargs.pop("selected", False)
callFunction = kwargs.pop("callFunction", True)
change = kwargs.pop("change", None)
# need slightly different approach, as use two params
if name is None: return self.getRadioButton(title) # no name = get
else:
ident = title + "-" + name
try: self.widgetManager.verify(widgKind, ident)
except:
self.setRadioButton(title, name, callFunction=callFunction)
rb = self.getRadioButton(title)
selected = False
else:
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
rb = self._radioButtonMaker(title, name, *args, **kwargs)
if selected: self.setRadioButton(title, name)
if change is not None: self.setRadioButtonChangeFunction(title, change)
if len(kwargs) > 0:
self._configWidget(ident, widgKind, **kwargs)
return rb
def _radioButtonMaker(self, title, name, row=None, column=0, colspan=0, rowspan=0, **kwargs):
return self.addRadioButton(title, name, row, column, colspan, rowspan)
def addRadioButton(self, title, name, row=None, column=0, colspan=0, rowspan=0):
''' adds a radio button, to thr group 'title' with the text 'name' '''
ident = title + "-" + name
self.widgetManager.verify(self.Widgets.RadioButton, ident)
var = None
newRb = False
# title - is the grouper
# so, if we already have an entry in n_rbVars - get it
if (title in self.widgetManager.group(self.Widgets.RadioButton, group=WidgetManager.VARS)):
var = self.widgetManager.get(self.Widgets.RadioButton, title, group=WidgetManager.VARS)
else:
# if this is a new grouper - set it all up
var = StringVar(self.topLevel)
self.widgetManager.add(self.Widgets.RadioButton, title, var, group=WidgetManager.VARS)
newRb = True
# finally, create the actual RadioButton
if not self.ttkFlag:
rb = Radiobutton(self.getContainer(), text=name, variable=var, value=name)
rb.config(anchor=W, background=self._getContainerBg(), indicatoron=1,
activebackground=self._getContainerBg(), font=self._getContainerProperty('labelFont')
)
else:
rb = ttk.Radiobutton(self.getContainer(), text=name, variable=var, value=name)
rb.bind("<Button-1>", self._grabFocus)
rb.DEFAULT_TEXT = name
self.widgetManager.add(self.Widgets.RadioButton, ident, rb)
#rb.bind("<Tab>", self._focusNextWindow)
#rb.bind("<Shift-Tab>", self._focusLastWindow)
# and select it, if it's the first item in the list
if newRb:
rb.select() if not self.ttkFlag else rb.invoke()
var.startVal = name # so we can reset it...
self._positionWidget(rb, row, column, colspan, rowspan, EW)
return rb
def getRadioButton(self, title):
var = self.widgetManager.get(self.Widgets.RadioButton, title, group=WidgetManager.VARS)
return var.get()
def getAllRadioButtons(self):
rbs = {}
for k in self.widgetManager.group(self.Widgets.RadioButton, group=WidgetManager.VARS):
rbs[k] = self.getRadioButton(k)
return rbs
def setRadioButton(self, title, value, callFunction=True):
ident = title + "-" + value
self.widgetManager.get(self.Widgets.RadioButton, ident)
# now call function
var = self.widgetManager.get(self.Widgets.RadioButton, title, group=WidgetManager.VARS)
with PauseCallFunction(callFunction, var, False):
var.set(value)
def clearAllRadioButtons(self, callFunction=False):
for rb in self.widgetManager.group(self.Widgets.RadioButton, group=WidgetManager.VARS):
self.setRadioButton(rb, self.widgetManager.get(self.Widgets.RadioButton, rb, group=WidgetManager.VARS).startVal, callFunction=callFunction)
def setRadioTick(self, title, tick=True):
for k, v in self.widgetManager.group(self.Widgets.RadioButton).items():
if k.startswith(title+"-"):
if tick:
v.config(indicatoron=1)
else:
v.config(indicatoron=0)
#####################################
# FUNCTION for list box
#####################################
def listbox(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for listBox() """
return self.listBox(title, value, *args, **kwargs)
def listBox(self, title, value=None, *args, **kwargs):
""" simpleGUI -- adds, sets & gets listBoxes all in one go """
widgKind = self.Widgets.ListBox
rows = kwargs.pop("rows", None)
multi = kwargs.pop("multi", False)
group = kwargs.pop("group", False)
selected = kwargs.pop("selected", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if value is not None: self.selectListItem(title, value, *args, **kwargs)
listBox = self.getListBox(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
listBox = self._listBoxMaker(title, value, *args, **kwargs)
if rows is not None: self.setListBoxRows(title, rows)
if multi: self.setListBoxMulti(title)
if group: self.setListBoxGroup(title)
if selected is not None: self.selectListItemAtPos(title, selected, callFunction=False)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return listBox
def _listBoxMaker(self, name, values=None, row=None, column=0, colspan=0, rowspan=0, **kwargs):
""" internal wrapper to hide kwargs from original add functions """
return self.addListBox(name, values, row, column, colspan, rowspan)
def addListBox(self, name, values=None, row=None, column=0, colspan=0, rowspan=0):
''' adds a list box, with the the specified list of values '''
self.widgetManager.verify(self.Widgets.ListBox, name)
container = self.makeListBoxContainer()(self.getContainer())
vscrollbar = AutoScrollbar(container)
hscrollbar = AutoScrollbar(container, orient=HORIZONTAL)
container.lb = Listbox(container,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set)
vscrollbar.grid(row=0, column=1, sticky=N + S)
hscrollbar.grid(row=1, column=0, sticky=E + W)
container.lb.grid(row=0, column=0, sticky=N + S + E + W)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
vscrollbar.config(command=container.lb.yview)
hscrollbar.config(command=container.lb.xview)
container.lb.config(font=self._getContainerProperty('inputFont'))
self.widgetManager.add(self.Widgets.ListBox, name, container.lb)
container.lb.DEFAULT_TEXT=""
if values is not None:
container.lb.DEFAULT_TEXT='\n'.join(str(x) for x in values)
for name in values:
container.lb.insert(END, name)
self._positionWidget(container, row, column, colspan, rowspan)
return container.lb
# enable multiple listboxes to be selected at the same time
def setListBoxGroup(self, name, group=True):
lb = self.widgetManager.get(self.Widgets.ListBox, name)
group = not group
lb.config(exportselection=group)
# set how many rows to display
def setListBoxRows(self, name, rows):
lb = self.widgetManager.get(self.Widgets.ListBox, name)
lb.config(height=rows)
# make the list single/multi select
# default is single
def setListBoxMulti(self, title, multi=True):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
if multi:
lb.config(selectmode=EXTENDED)
else:
lb.config(selectmode=BROWSE)
# select the specified item in the list
def selectListItem(self, title, item, callFunction=True):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
positions = self._getListPositions(title, item)
if len(positions) > 1 and lb.cget("selectmode") == EXTENDED:
allOk = True
for pos in positions:
if not self.selectListItemAtPos(title, pos, callFunction):
allOk = False
return allOk
elif len(positions) > 1:
gui.warn("Unable to select multiple items for list: %s. Selecting first item: %s", title, item[0])
return self.selectListItemAtPos(title, positions[0], callFunction)
elif len(positions) == 1:
return self.selectListItemAtPos(title, positions[0], callFunction)
else:
gui.warn("Invalid list item(s): %s for list: %s", item, title)
return False
def selectListItemAtPos(self, title, pos, callFunction=False):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
if lb.size() == 0:
gui.warn("No items in list: %s, unable to select item at pos: %s", title, pos)
return False
if pos < 0 or pos > lb.size() - 1:
gui.warn("Invalid list position: %s for list: %s (max: %s)", pos, title, lb.size()-1)
return False
# clear previous selection if we're not multi
if lb.cget("selectmode") != EXTENDED:
lb.selection_clear(0, END)
# show & select this item
lb.see(pos)
lb.activate(pos)
lb.selection_set(pos)
# now call function
if callFunction and hasattr(lb, 'cmd'):
lb.cmd()
self.topLevel.update_idletasks()
return True
# replace the list items in the list box
def updateListBox(self, title, items, select=False, callFunction=True):
self.clearListBox(title, callFunction=callFunction)
self.addListItems(title, items, select=select)
def addListItems(self, title, items, select=True):
''' adds the list of items to the specified list box '''
for i in items:
self.addListItem(title, i, select=select)
def addListItem(self, title, item, pos=None, select=True):
''' add the item to the end of the specified list box '''
lb = self.widgetManager.get(self.Widgets.ListBox, title)
# add it at the end
if pos is None: pos = END
lb.insert(pos, item)
# show & select the newly added item
if select:
# clear any selection
items = lb.curselection()
if len(items) > 0:
lb.selection_clear(items)
self.selectListItemAtPos(title, lb.size() - 1)
# returns a list containing 0 or more elements
# all that are in the selected range
def getListBox(self, title):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
items = lb.curselection()
values = []
for loop in range(len(items)):
values.append(lb.get(items[loop]))
return values
def getAllListBoxes(self):
boxes = {}
for k in self.widgetManager.group(self.Widgets.ListBox):
boxes[k] = self.getListBox(k)
return boxes
def getAllListItems(self, title):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
items = lb.get(0, END)
return list(items)
def getListBoxPos(self, title):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
# bug in tkinter 1.160 returns these as strings
items = [int(i) for i in lb.curselection()]
return items
def removeListItemAtPos(self, title, pos):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
items = lb.get(0, END)
if pos >= len(items):
raise Exception("Invalid position: " + str(pos))
lb.delete(pos)
# show & select this item
if pos >= lb.size():
pos -= 1
self.selectListItemAtPos(title, pos)
# remove a specific item from the listBox
# will only remove the first item that matches the String
def removeListItem(self, title, item):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
positions = self._getListPositions(title, item)
if len(positions) > 0:
lb.delete(positions[0])
# show & select this item
if positions[0] >= lb.size():
positions[0] -= 1
self.selectListItemAtPos(title, positions[0])
def setListItemAtPos(self, title, pos, newVal):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
lb.delete(pos)
lb.insert(pos, newVal)
def setListItem(self, title, item, newVal, first=False):
for pos in self._getListPositions(title, item):
self.setListItemAtPos(title, pos, newVal)
if first:
break
# functions to config
def setListItemAtPosBg(self, title, pos, col):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
lb.itemconfig(pos, bg=col)
def setListItemAtPosFg(self, title, pos, col):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
lb.itemconfig(pos, fg=col)
def _getListPositions(self, title, item):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
if not isinstance(item, list):
item = [item]
vals = lb.get(0, END)
positions = []
for pos, val in enumerate(vals):
if val in item:
positions.append(pos)
return positions
def setListItemBg(self, title, item, col):
for pos in self._getListPositions(title, item):
self.setListItemAtPosBg(title, pos, col)
def setListItemFg(self, title, item, col):
for pos in self._getListPositions(title, item):
self.setListItemAtPosFg(title, pos, col)
def clearListBox(self, title, callFunction=True):
lb = self.widgetManager.get(self.Widgets.ListBox, title)
lb.selection_clear(0, END)
lb.delete(0, END) # clear
if callFunction and hasattr(lb, 'cmd'):
lb.cmd()
def clearAllListBoxes(self, callFunction=False):
for lb in self.widgetManager.group(self.Widgets.ListBox):
self.clearListBox(lb, callFunction)
#####################################
# FUNCTION for buttons
#####################################
def button(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets buttons all in one go """
widgKind = self.Widgets.Button
image = kwargs.pop("image", None)
icon = kwargs.pop("icon", None)
name = kwargs.pop("label", kwargs.pop("name", None))
try: self.widgetManager.verify(self.Widgets.Button, title)
except: # widget exists
if value is not None: self.setButton(title, value)
button = self.getButton(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if image is not None: button = self._buttonMaker(title, value, "image", image, *args, **kwargs)
elif icon is not None: button = self._buttonMaker(title, value, "icon", icon, *args, **kwargs)
elif name is not None: button = self._buttonMaker(title, value, "named", name, *args, **kwargs)
else: button = self._buttonMaker(title, value, "button", None, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return button
def _buttonMaker(self, title, func, kind, extra=None, row=None, column=0, colspan=0, rowspan=0, *args, **kwargs):
""" internal wrapper to hide kwargs from original add functions """
align = kwargs.pop("align", None)
if kind == "button": return self.addButton(title, func, row, column, colspan, rowspan)
elif kind == "named": return self.addNamedButton(extra, title, func, row, column, colspan, rowspan)
elif kind == "image": return self.addImageButton(title, func, extra, row, column, colspan, rowspan, align=align)
elif kind == "icon": return self.addIconButton(title, func, extra, row, column, colspan, rowspan, align=align)
def _configWidget(self, title, kind, **kwargs):
widget = self.widgetManager.get(kind, title)
# remove any unwanted keys
for key in ["row", "column", "colspan", "rowspan", "label", "name"]:
kwargs.pop(key, None)
# ignore these for now as well
for key in ["pad", "inpad"]:
val = kwargs.pop(key, None)
if val is not None:
gui.error("Invalid argument for %s %s - %s:%s", self.Widgets.name(kind), title, key, val)
tooltip = kwargs.pop("tip", kwargs.pop("tooltip", None))
change = kwargs.pop("change", None)
submit = kwargs.pop("submit", None)
over = kwargs.pop("over", None)
drag = kwargs.pop("drag", None)
drop = kwargs.pop("drop", None)
right = kwargs.pop("right", None)
focus = kwargs.pop('focus', False)
_font = kwargs.pop('font', None)
if tooltip is not None: self._addTooltip(widget, tooltip, None)
if focus: widget.focus_set()
if change is not None: self._bindEvent(kind, title, widget, change, "change", key=None)
if submit is not None: self._bindEvent(kind, title, widget, submit, "submit", key=None)
if over is not None: self._bindOverEvent(kind, title, widget, over, None, None)
if drag is not None: self._bindDragEvent(kind, title, widget, drag, None, None)
if drop is not None: self._registerExternalDropTarget(title, widget, drop)
if right is not None: self._bindRightClick(widget, right)
# allow fonts to be passed in as either a dictionary or a single integer or a font object
if _font is not None:
if isinstance(_font, tkFont.Font):
widget.config(font=_font)
else:
if not isinstance(_font, dict): # assume int
_font = {"size":_font}
custFont = tkFont.Font(**_font)
widget.config(font=custFont)
# now pass the kwargs to the config function, ignore any baddies
errorMsg = ""
while True:
try: widget.config(**kwargs)
except TclError as e:
try:
key=str(e).split()[2][2:-1]
errorMsg = "".join([errorMsg, key, ":", kwargs.pop(key), ", "])
except:
gui.error("Invalid argument for %s %s: %s", self.Widgets.name(kind), title, e)
break
else:
break
if len(errorMsg) > 0:
gui.error("Invalid arguments for %s %s - %s", self.Widgets.name(kind), title, errorMsg)
def _buildButton(self, title, func, frame, name=None):
if name is None:
name = title
if isinstance(title, list):
raise Exception("Can't add a button using a list of names: " + str(title) + " - you should use .addButtons()")
self.widgetManager.verify(self.Widgets.Button, title)
if not self.ttkFlag:
but = Button(frame, text=name)
but.config(font=self._getContainerProperty('buttonFont'))
if self.platform in [self.MAC, self.LINUX]:
but.config(highlightbackground=self._getContainerBg())
else:
but = ttk.Button(frame, text=name)
but.DEFAULT_TEXT = name
if func is not None:
command = self.MAKE_FUNC(func, title)
but.config(command=command)
#but.bind("<Tab>", self._focusNextWindow)
#but.bind("<Shift-Tab>", self._focusLastWindow)
self.widgetManager.add(self.Widgets.Button, title, but)
return but
def addNamedButton(self, name, title, func, row=None, column=0, colspan=0, rowspan=0):
''' adds a button, displaying the name as its text '''
but = self._buildButton(title, func, self.getContainer(), name)
self._positionWidget(but, row, column, colspan, rowspan, None)
return but
def addButton(self, title, func, row=None, column=0, colspan=0, rowspan=0):
''' adds a button with the title as its text '''
but = self._buildButton(title, func, self.getContainer())
self._positionWidget(but, row, column, colspan, rowspan, None)
return but
def addImageButton(self, title, func, imgFile, row=None, column=0, colspan=0, rowspan=0, align=None):
''' adds a button, displaying the specified image file '''
but = self._buildButton(title, func, self.getContainer())
self._positionWidget(but, row, column, colspan, rowspan, None)
self.setButtonImage(title, imgFile, align)
return but
def addIconButton(self, title, func, iconName, row=None, column=0, colspan=0, rowspan=0, align=None):
''' adds a button displaying the specified icon '''
icon = os.path.join(self.icon_path, iconName.lower()+".png")
with PauseLogger():
return self.addImageButton(title, func, icon, row, column, colspan, rowspan, align)
def setButton(self, name, text):
but = self.widgetManager.get(self.Widgets.Button, name)
try: # try to bind a function
command = self.MAKE_FUNC(text, name)
but.config(command=command)
except: # otherwise change the text
but.config(text=text)
def getButton(self, name):
but = self.widgetManager.get(self.Widgets.Button, name)
return but.cget("text")
def setButtonImage(self, name, imgFile, align=None):
but = self.widgetManager.get(self.Widgets.Button, name)
image = self._getImage(imgFile)
# works on Mac & Windows :)
if align == None:
but.config(image=image, compound=TOP, text="")
if not self.ttk:
but.config(justify=LEFT)
else:
but.config(image=image, compound=align)
# but.config(image=image, compound=None, text="") # works on Windows, not Mac
but.image = image
# adds a set of buttons, in the row, spannning specified columns
# pass in a list of names & a list of functions (or a single function to
# use for all)
def buttons(self, names, funcs, **kwargs):
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
self._addButtons(names, funcs, **kwargs)
if not isinstance(names[0], list):
names = [names]
for row in names:
for title in row:
self._configWidget(title, self.Widgets.Button, **kwargs)
def _addButtons(self, names, funcs, row=None, column=0, colspan=0, rowspan=0, **kwargs):
self.addButtons(names, funcs, row, column, colspan, rowspan)
def addButtons(self, names, funcs, row=None, column=0, colspan=0, rowspan=0):
''' adds a 1D/2D list of buttons '''
if not isinstance(names, list):
raise Exception(
"Invalid button: " +
names +
". It must be a list of buttons.")
singleFunc = self._checkFunc(names, funcs)
frame = self._makeWidgetBox()(self.getContainer())
if not self.ttk:
frame.config(background=self._getContainerBg())
# make them into a 2D array, if not already
if not isinstance(names[0], list):
names = [names]
# won't be used if single func
if funcs is not None:
funcs = [funcs]
for bRow in range(len(names)):
for i in range(len(names[bRow])):
t = names[bRow][i]
if funcs is None:
tempFunc = None
elif singleFunc is None:
tempFunc = funcs[bRow][i]
else:
tempFunc = singleFunc
but = self._buildButton(t, tempFunc, frame)
but.grid(row=bRow, column=i)
Grid.columnconfigure(frame, i, weight=1)
Grid.rowconfigure(frame, bRow, weight=1)
frame.theWidgets.append(but)
self._positionWidget(frame, row, column, colspan, rowspan)
self.widgetManager.log(self.Widgets.FrameBox, frame)
#####################################
# FUNCTIONS for links
#####################################
def link(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets links all in one go """
widgKind = self.Widgets.Link
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if value is not None: self.setLink(title, value)
link = self.getLink(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
link = self._linkMaker(title, value, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return link
def _linkMaker(self, title, value, row=None, column=0, colspan=0, rowspan=0, *args, **kwargs):
if not callable(value) and not hasattr(value, '__call__'):
return self.addWebLink(title, value, row, column, colspan, rowspan)
else:
return self.addLink(title, value, row, column, colspan, rowspan)
def _buildLink(self, title):
self._importWebBrowser()
if not webbrowser:
self.error("Unable to load webbrowser - can't create links")
link = self._makeLink()(self.getContainer(), useTtk=self.ttkFlag)
link.config(text=title, font=self._linkFont)
if not self.ttk:
link.config(background=self._getContainerBg())
self.widgetManager.add(self.Widgets.Link, title, link)
return link
# launches a browser to the specified page
def addWebLink(self, title, page, row=None, column=0, colspan=0, rowspan=0):
''' adds a hyperlink to the specified web page '''
link = self._buildLink(title)
link.registerWebpage(page)
self._positionWidget(link, row, column, colspan, rowspan)
return link
# executes the specified function
def addLink(self, title, func, row=None, column=0, colspan=0, rowspan=0):
''' adds a hyperlink to the specified function '''
link = self._buildLink(title)
if func is not None:
myF = self.MAKE_FUNC(func, title)
link.registerCallback(myF)
self._positionWidget(link, row, column, colspan, rowspan)
return link
def getLink(self, title):
link = self.widgetManager.get(self.Widgets.Link, title)
return link.cget("text")
def setLink(self, title, func):
link = self.widgetManager.get(self.Widgets.Link, title)
if not callable(func) and not hasattr(func, '__call__'):
link.registerWebpage(func)
else:
myF = self.MAKE_FUNC(func, title)
link.registerCallback(myF)
#####################################
# FUNCTIONS for grips
#####################################
def grip(self, *args, **kwargs):
""" simpleGUI - adds grip """
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
return self.addGrip(*args, **kwargs)
# adds a simple grip, used to drag the window around
def addGrip(self, row=None, column=0, colspan=0, rowspan=0):
''' adds a grip, for dragging the GUI around '''
grip = self._makeGrip()(self.getContainer())
self._positionWidget(grip, row, column, colspan, rowspan)
self._addTooltip(grip, "Drag here to move", True)
return grip
#####################################
# FUNCTIONS for dnd
#####################################
def addTrashBin(self, title, row=None, column=0, colspan=0, rowspan=0):
''' NOT IN USE - adds a trashbin, for discarding dragged items '''
trash = TrashBin(self.getContainer())
self._positionWidget(trash, row, column, colspan, rowspan)
return trash
#####################################
# FUNCTIONS for turtle
#####################################
def addTurtle(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a turtle widget at the specified position '''
self._loadTurtle()
if turtle is False:
raise Exception("Unable to load turtle")
self.widgetManager.verify(self.Widgets.Turtle, title)
canvas = Canvas(self.getContainer())
canvas.screen = turtle.TurtleScreen(canvas)
self._positionWidget(canvas, row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.Turtle, title, canvas)
canvas.turtle = turtle.RawTurtle(canvas.screen)
return canvas.turtle
def getTurtleScreen(self, title):
return self.widgetManager.get(self.Widgets.Turtle, title).screen
def getTurtle(self, title):
return self.widgetManager.get(self.Widgets.Turtle, title).turtle
#####################################
# FUNCTIONS for canvas
#####################################
def addCanvas(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a canvas at the specified position '''
self.widgetManager.verify(self.Widgets.Canvas, title)
canvas = Canvas(self.getContainer())
canvas.config(bd=0, highlightthickness=0)
canvas.imageStore = []
self._positionWidget(canvas, row, column, colspan, rowspan, "news")
self.widgetManager.add(self.Widgets.Canvas, title, canvas)
return canvas
def getCanvas(self, title):
return self.widgetManager.get(self.Widgets.Canvas, title)
def clearCanvas(self, title):
self.widgetManager.get(self.Widgets.Canvas, title).delete("all")
# function to configure a canvas map
def setCanvasMap(self, name, func, coords):
self._setWidgetMap(name, self.Widgets.Canvas, func, coords)
def addCanvasCircle(self, title, x, y, diameter, **kwargs):
''' adds a circle to the specified canvas '''
return self.addCanvasOval(title, x, y, diameter, diameter, **kwargs)
def addCanvasOval(self, title, x, y, xDiam, yDiam, **kwargs):
''' adds a oval to the specified canvas '''
return self.widgetManager.get(self.Widgets.Canvas, title).create_oval(x, y, x+xDiam, y+yDiam, **kwargs)
def addCanvasLine(self, title, x, y, x2, y2, **kwargs):
''' adds a line to the specified canvas '''
return self.widgetManager.get(self.Widgets.Canvas, title).create_line(x, y, x2, y2, **kwargs)
def addCanvasRectangle(self, title, x, y, w, h, **kwargs):
''' adds a rectangle to the specified canvas '''
return self.widgetManager.get(self.Widgets.Canvas, title).create_rectangle(x, y, x+w, y+h, **kwargs)
def addCanvasText(self, title, x, y, text=None, **kwargs):
''' adds text to the specified canvas '''
return self.widgetManager.get(self.Widgets.Canvas, title).create_text(x, y, text=text, **kwargs)
def addCanvasImage(self, title, x, y, image=image, **kwargs):
''' adds an image to the specified canvas '''
canv = self.widgetManager.get(self.Widgets.Canvas, title)
if isinstance(image, STRING):
image = self._getImage(image)
canv.imageStore.append(image)
return self.widgetManager.get(self.Widgets.Canvas, title).create_image(x, y, image=image, **kwargs)
def setCanvasEvent(self, title, item, event, function, add=None):
canvas = self.widgetManager.get(self.Widgets.Canvas, title)
canvas.tag_bind(item, event, function, add)
def _canvasMaker(self, title, row=None, column=0, colspan=0, rowspan=0, **kwargs):
return self.addCanvas(title, row, column, rowspan)
def canvas(self, title, *args, **kwargs):
""" simpleGUI - adds, sets & gets canases all in one go """
widgKind = self.Widgets.Canvas
submit = kwargs.pop("submit", None)
_map = kwargs.pop("map", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
# NB. no SETTER
canvas = self.getCanvas(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
canvas = self._canvasMaker(title, *args, **kwargs)
if submit is not None and _map is not None:
self.setCanvasMap(title, submit, _map)
else:
gui.warn("Must specify a submit function when setting a canvas map: %s", title)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
self._configWidget(title, widgKind, **kwargs)
return canvas
#####################################
# FUNCTIONS for Microbits
#####################################
def microbit(self, title, *args, **kwargs):
'''simpleGUI - adds, sets & gets microbits all in one go'''
widgKind = self.Widgets.MicroBit
image = kwargs.pop("image", None)
brightness = kwargs.pop("brightness", None)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
clear = kwargs.pop("clear", False)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
mb = self.getMicroBit(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
mb = self.addMicroBit(title, *args, **kwargs)
if image is not None: self.setMicroBitImage(title, image)
if brightness is not None: self.setMicroBitPixel(title, x, y, brightness)
if clear: self.clearMicroBit(title)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return mb
def addMicroBit(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds a simple microbit widget
used with permission from Ben Goodwin '''
self.widgetManager.verify(self.Widgets.MicroBit, title)
mb = MicroBitSimulator(self.getContainer())
self._positionWidget(mb, row, column, colspan, rowspan)
self.widgetManager.add(self.Widgets.MicroBit, title, mb)
return mb
def setMicroBitImage(self, title, image):
self.widgetManager.get(self.Widgets.MicroBit, title).show(image)
def setMicroBitPixel(self, title, x, y, brightness):
self.widgetManager.get(self.Widgets.MicroBit, title).set_pixel(x, y, brightness)
def clearMicroBit(self, title):
self.widgetManager.get(self.Widgets.MicroBit, title).clear()
#####################################
# DatePicker Widget - using Form Container
#####################################
def date(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for datePicker() """
return self.datePicker(title, value, *args, **kwargs)
def datePicker(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets datePickers all in one go """
widgKind = self.Widgets.DatePicker
change = kwargs.pop("change", None)
toValue = kwargs.pop("toValue", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
dp = self.getDatePicker(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
dp = self.addDatePicker(title, *args, **kwargs)
if value is not None:
if toValue is None: self.setDatePicker(title, value)
else: self.setDatePickerRange(title, startYear=value, endYear=toValue)
if change is not None: self.setDatePickerChangeFunction(title, change)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return dp
def addDatePicker(self, name, row=None, column=0, colspan=0, rowspan=0):
''' adds a date picker at the specified position '''
self.widgetManager.verify(self.Widgets.DatePicker, name)
# initial DatePicker has these dates
days = range(1, 32)
self.MONTH_NAMES = calendar.month_name[1:]
years = range(1970, 2021)
# create a frame, and add the widgets
frame = self.startFrame(name, row, column, colspan, rowspan)
self.setExpand("none")
self.addLabel(name + "_DP_DayLabel", "Day:", 0, 0)
self.setLabelAlign(name + "_DP_DayLabel", "w")
self.addOptionBox(name + "_DP_DayOptionBox", days, 0, 1)
self.addLabel(name + "_DP_MonthLabel", "Month:", 1, 0)
self.setLabelAlign(name + "_DP_MonthLabel", "w")
self.addOptionBox(name + "_DP_MonthOptionBox", self.MONTH_NAMES, 1, 1)
self.addLabel(name + "_DP_YearLabel", "Year:", 2, 0)
self.setLabelAlign(name + "_DP_YearLabel", "w")
self.addOptionBox(name + "_DP_YearOptionBox", years, 2, 1)
self.setOptionBoxChangeFunction(
name + "_DP_MonthOptionBox",
self._updateDatePickerDays)
self.setOptionBoxChangeFunction(
name + "_DP_YearOptionBox",
self._updateDatePickerDays)
self.stopFrame()
frame.isContainer = False
self.widgetManager.add(self.Widgets.DatePicker, name, frame)
def setDatePickerFg(self, name, fg):
self.widgetManager.get(self.Widgets.DatePicker, name)
self.setLabelFg(name + "_DP_DayLabel", fg)
self.setLabelFg(name + "_DP_MonthLabel", fg)
self.setLabelFg(name + "_DP_YearLabel", fg)
def setDatePickerChangeFunction(self, title, function):
self.widgetManager.get(self.Widgets.DatePicker, title)
cmd = self.MAKE_FUNC(function, title)
self.setOptionBoxChangeFunction(title + "_DP_DayOptionBox", cmd)
self.widgetManager.get(self.Widgets.OptionBox, title + "_DP_DayOptionBox").function = cmd
# function to update DatePicker dropDowns
def _updateDatePickerDays(self, title):
if title.find("_DP_MonthOptionBox") > -1:
title = title.split("_DP_MonthOptionBox")[0]
elif title.find("_DP_YearOptionBox") > -1:
title = title.split("_DP_YearOptionBox")[0]
else:
self.warn("Can't update days in DatePicker:%s", title)
return
day = self.getOptionBox(title + "_DP_DayOptionBox")
month = self.MONTH_NAMES.index(self.getOptionBox(title + "_DP_MonthOptionBox")) + 1
year = int(self.getOptionBox(title + "_DP_YearOptionBox"))
days = range(1, calendar.monthrange(year, month)[1] + 1)
self.changeOptionBox(title + "_DP_DayOptionBox", days)
# keep previous day if possible
with PauseLogger():
self.setOptionBox(title + "_DP_DayOptionBox", day, callFunction=False)
box = self.widgetManager.get(self.Widgets.OptionBox, title + "_DP_DayOptionBox")
if hasattr(box, 'function'):
box.function()
# set a date for the named DatePicker
def setDatePickerRange(self, title, startYear, endYear=None):
self.widgetManager.get(self.Widgets.DatePicker, title)
if endYear is None:
endYear = datetime.date.today().year
years = range(startYear, endYear + 1)
self.changeOptionBox(title + "_DP_YearOptionBox", years)
def setDatePicker(self, title, date="today"):
self.widgetManager.get(self.Widgets.DatePicker, title)
if date == "today":
date = datetime.date.today()
self.setOptionBox(title + "_DP_YearOptionBox", str(date.year))
self.setOptionBox(title + "_DP_MonthOptionBox", date.month - 1)
self.setOptionBox(title + "_DP_DayOptionBox", date.day - 1)
def clearDatePicker(self, title, callFunction=True):
self.widgetManager.get(self.Widgets.DatePicker, title)
self.setOptionBox(title + "_DP_YearOptionBox", 0, callFunction)
self.setOptionBox(title + "_DP_MonthOptionBox", 0, callFunction)
self.setOptionBox(title + "_DP_DayOptionBox", 0, callFunction)
def clearAllDatePickers(self, callFunction=False):
for k in self.widgetManager.group(self.Widgets.DatePicker):
self.clearDatePicker(k, callFunction)
def getDatePicker(self, title):
self.widgetManager.get(self.Widgets.DatePicker, title)
day = int(self.getOptionBox(title + "_DP_DayOptionBox"))
month = self.MONTH_NAMES.index(
self.getOptionBox(
title + "_DP_MonthOptionBox")) + 1
year = int(self.getOptionBox(title + "_DP_YearOptionBox"))
date = datetime.date(year, month, day)
return date
def getAllDatePickers(self):
dps = {}
for k in self.widgetManager.group(self.Widgets.DatePicker):
dps[k] = self.getDatePicker(k)
return dps
#####################################
# FUNCTIONS for ACCESSABILITY
#####################################
def _makeAccess(self):
if not self.accessMade:
def _close(): self.hideSubWindow("access_access_subwindow")
def _changeFg(): self.label("access_fg_colBox", bg=self.colourBox(self.getLabelBg("access_fg_colBox")))
def _changeBg(): self.label("access_bg_colBox", bg=self.colourBox(self.getLabelBg("access_bg_colBox")))
def _settings():
font = {"underline":self.check("access_underline_check"), "overstrike":self.check("access_overstrike_check")}
font["weight"] = "bold" if self.check("access_bold_check") is True else "normal"
font["slant"] = "roman" if self.radio("access_italic_radio") == "Normal" else "italic"
if len(self.listbox("access_family_listbox")) > 0: font["family"] = self.listbox("access_family_listbox")[0]
if self.option("access_size_option") is not None: font["size"] = self.option("access_size_option")
if self.check('access_label_check'): self.labelFont = font
if self.check('access_input_check'): self.inputFont = font
if self.check('access_button_check'): self.buttonFont = font
self.bg = self.getLabelBg("access_bg_colBox")
self.fg = self.getLabelBg("access_fg_colBox")
self.accessOrigFont = self.accessOrigBg = self.accessOrigFg = None
with self.subWindow("access_access_subwindow", sticky = "news", title="Accessibility", resizable=False) as sw:
if not self.ttk:
sw.config(padx=5, pady=1)
with self.labelFrame("access_font_labelframe", sticky="news", name="Font") as lf:
if not self.ttk:
lf.config(padx=5, pady=5, font=self._accessFont)
with self.frame("access_ticks_frame", colspan=2):
self.check("access_label_check", True, label="Labels", pos=(0,0), font=self._accessFont, tip="Set label fonts")
self.check("access_input_check", label="Inputs", pos=(0,1), font=self._accessFont, tip="Set input fonts")
self.check("access_button_check", label="Buttons", pos=(0,2), font=self._accessFont, tip="Set button fonts")
self.listbox("access_family_listbox", self.fonts, rows=6, tip="Choose a font", colspan=2, font=self._accessFont)
self.option("access_size_option", [7, 8, 9, 10, 12, 13, 14, 16, 18, 20, 22, 25, 29, 34, 40], label="Size:", tip="Choose a font size", font=self._accessFont)
self.check("access_bold_check", name="Bold", pos=('p',1), tip="Check this to make all font bold", font=self._accessFont)
self.radio("access_italic_radio", "Normal", tip="No italics", font=self._accessFont)
self.radio("access_italic_radio", "Italic", pos=('p',1), tip="Set font italic", font=self._accessFont)
self.check("access_underline_check", name="Underline", tip="Underline all text", font=self._accessFont)
self.check("access_overstrike_check", name="Overstrike", pos=('p',1), tip="Strike out all text", font=self._accessFont)
with self.labelFrame("access_colour_labelframe", sticky="news", name="Colours") as lf:
lf.config(padx=5, pady=5, font=self._accessFont)
self.label("access_fg_text", "Foreground:", sticky="ew", anchor="w", font=self._accessFont)
self.label("access_fg_colBox", "", pos=('p',1), sticky="ew", submit=_changeFg, relief="ridge", tip="Click here to set the foreground colour", font=self._accessFont, width=14)
self.label("access_bg_text", "Background:", sticky="ew", anchor="w", font=self._accessFont)
self.label("access_bg_colBox", "", pos=('p',1), sticky="ew", submit=_changeBg, relief="ridge", tip="Click here to set the background colour", font=self._accessFont, width=14)
self.sticky="se"
with self.frame("access_button_box"):
self.button("access_apply_button", _settings, name="Apply", pos=(0,0), font=self._accessFont)
self.button("access_reset_button", self._resetAccess, name="Reset", pos=(0,1), font=self._accessFont)
self.button("access_close_button", _close, name="Close", pos=(0,2), font=self._accessFont)
self.accessMade = True
def _resetAccess(self):
if self.accessMade:
self.check("access_label_check", True)
self.check("access_input_check", False)
self.check("access_button_check", False)
self.listbox("access_family_listbox", self.accessOrigFont["family"])
self.option("access_size_option", str(self.accessOrigFont["size"]))
if self.accessOrigFont["weight"] == "normal": self.check("access_bold_check", False)
else: self.check("access_bold_check", True)
if self.accessOrigFont["slant"] == "roman": self.radio("access_italic_radio", "Normal")
else: self.radio("access_italic_radio", "Italic")
self.check("access_overstrike_check", self.accessOrigFont["overstrike"])
self.check("access_underline_check", self.accessOrigFont["underline"])
self.label("access_fg_colBox", bg=self.accessOrigFg)
self.label("access_bg_colBox", bg=self.accessOrigBg)
else:
gui.warn("Accessibility not set up yet.")
def showAccess(self, location=None):
self._makeAccess()
# update current settings
self.accessOrigFont = self.font
self.accessOrigBg = self.bg
self.accessOrigFg = self.fg
self._resetAccess()
self.showSubWindow("access_access_subwindow")
#####################################
# FUNCTIONS for labels
#####################################
def _parsePos(self, pos, kwargs):
# alternative for specifying position
if type(pos) != list and type(pos) != tuple: pos = (pos,)
if len(pos) > 0: kwargs["row"] = pos[0]
if len(pos) > 1: kwargs["column"] = pos[1]
if len(pos) > 2: kwargs["colspan"] = pos[2]
if len(pos) > 3: kwargs["rowspan"] = pos[3]
# allow an alternative kwarg
if "col" in kwargs: kwargs["column"]=kwargs.pop("col")
# let user specify stickt/stretch/expan
sticky = kwargs.pop("sticky", None)
if sticky is not None: self.setSticky(sticky)
stretch = kwargs.pop("stretch", None)
if stretch is not None: self.setStretch(stretch)
expand = kwargs.pop("expand", None)
if expand is not None: self.setExpand(expand)
return kwargs
def label(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets labels all in one go """
widgKind = self.Widgets.Label
kind = kwargs.pop("kind", "standard").lower().strip()
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if value is not None: self.setLabel(title, value)
label = self.getLabel(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if kind == "flash": label = self._labelMaker(title, value, kind, *args, **kwargs)
elif kind == "selectable": label = self._labelMaker(title, value, kind, *args, **kwargs)
else: label = self._labelMaker(title, value, "label", *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return label
def _labelMaker(self, title, text=None, kind="label", row=None, column=0, colspan=0, rowspan=0, **kwargs):
""" Internal wrapper, to hide kwargs from original add functions """
if kind == "flash": return self.addFlashLabel(title, text, row, column, colspan, rowspan)
elif kind == "selectable": return self.addSelectableLabel(title, text, row, column, colspan, rowspan)
elif kind == "label": return self.addLabel(title, text, row, column, colspan, rowspan)
def _flash(self):
if not self.alive: return
if self.doFlash:
for lab in self.widgetManager.group(self.Widgets.FlashLabel):
bg = lab.cget("background")
fg = lab.cget("foreground")
lab.config(background=fg, foreground=bg)
self.flashId = self.topLevel.after(250, self._flash)
def addFlashLabel(self, title, text=None, row=None, column=0, colspan=0, rowspan=0):
''' adds a label with flashing text '''
lab = self.addLabel(title, text, row, column, colspan, rowspan)
self.widgetManager.log(self.Widgets.FlashLabel, lab)
self.doFlash = True
return lab
def addSelectableLabel(self, title, text=None, row=None, column=0, colspan=0, rowspan=0):
''' adds a label with selectable text '''
return self.addLabel(title, text, row, column, colspan, rowspan, selectable=True)
def addLabel(self, title, text=None, row=None, column=0, colspan=0, rowspan=0, selectable=False):
"""Add a label to the GUI.
:param title: a unique identifier for the Label
:param text: optional text for the Label
:param row/column/colspan/rowspan: the row/column to position the label in & how many rows/columns to strecth across
:raises ItemLookupError: raised if the title is not unique
"""
self.widgetManager.verify(self.Widgets.Label, title)
if text is None:
gui.trace("Not specifying text for labels (%s) now uses the title for the text. If you want an empty label, pass an empty string ''", title)
text = title
if not selectable:
if not self.ttkFlag:
lab = Label(self.getContainer(), text=text)
lab.config(justify=LEFT, font=self._getContainerProperty('labelFont'), background=self._getContainerBg())
lab.origBg = self._getContainerBg()
else:
lab = ttk.Label(self.getContainer(), text=text)
else:
lab = SelectableLabel(self.getContainer(), text=text)
lab.config(justify=CENTER, font=self._getContainerProperty('labelFont'), background=self._getContainerBg())
lab.origBg = self._getContainerBg()
lab.inContainer = False
lab.DEFAULT_TEXT = text
self.widgetManager.add(self.Widgets.Label, title, lab)
self._positionWidget(lab, row, column, colspan, rowspan)
return lab
def addEmptyLabel(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds an empty label '''
return self.addLabel(title=title, text='', row=row, column=column, colspan=colspan, rowspan=rowspan)
def addLabels(self, names, row=None, colspan=0, rowspan=0):
''' adds a set of labels, in the row, spannning specified columns '''
frame = self._makeWidgetBox()(self.getContainer())
if not self.ttkFlag:
frame.config(background=self._getContainerBg())
for i in range(len(names)):
self.widgetManager.verify(self.Widgets.Label, names[i])
if not self.ttkFlag:
lab = Label(frame, text=names[i])
lab.config(font=self._getContainerProperty('labelFont'), justify=LEFT, background=self._getContainerBg())
else:
lab = ttk.Label(frame, text=names[i])
lab.DEFAULT_TEXT = names[i]
lab.inContainer = False
self.widgetManager.add(self.Widgets.Label, names[i], lab)
lab.grid(row=0, column=i)
Grid.columnconfigure(frame, i, weight=1)
Grid.rowconfigure(frame, 0, weight=1)
frame.theWidgets.append(lab)
self._positionWidget(frame, row, 0, colspan, rowspan)
self.widgetManager.log(self.Widgets.FrameBox, frame)
def setLabel(self, name, text):
lab = self.widgetManager.get(self.Widgets.Label, name)
lab.config(text=text)
def getLabel(self, name):
lab = self.widgetManager.get(self.Widgets.Label, name)
return lab.cget("text")
def clearLabel(self, name):
self.setLabel(name, "")
#####################################
# FUNCTIONS to add Text Area
#####################################
def text(self, title, value=None, *args, **kwargs):
""" simpleGUI - shortner for textArea() """
return self.textArea(title, value, *args, **kwargs)
def textArea(self, title, value=None, *args, **kwargs):
""" adds, sets & gets textAreas all in one go """
widgKind = self.Widgets.TextArea
scroll = kwargs.pop("scroll", False)
end = kwargs.pop("end", True)
callFunction = kwargs.pop("callFunction", True)
try: self.widgetManager.verify(self.Widgets.TextArea, title)
except: # widget exists
text = self.getTextArea(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if scroll: text = self._textMaker(title, "scroll", *args, **kwargs)
else: text = self._textMaker(title, "text", *args, **kwargs)
callFunction = False
if value is not None: self.setTextArea(title, value, end=end, callFunction=callFunction)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return text
def _textMaker(self, title, kind="text", row=None, column=0, colspan=0, rowspan=0, *args, **kwargs):
if kind == "scroll": return self.addScrolledTextArea(title, row, column, colspan, rowspan)
elif kind == "text": return self.addTextArea(title, row, column, colspan, rowspan)
def _buildTextArea(self, title, frame, scrollable=False):
""" Internal wrapper, used for building TextAreas.
:param title: the key used to reference this TextArea
:param frame: this should be a container, used as the parent for the OptionBox
:param scrollable: the key used to reference this TextArea
:returns: the created TextArea
:raises ItemLookupError: if the title is already in use
"""
self.widgetManager.verify(self.Widgets.TextArea, title)
if scrollable:
text = AjScrolledText(frame)
else:
text = AjText(frame)
text.config(width=20, height=10, undo=True, wrap=WORD)
if not self.ttkFlag:
if self.platform in [self.MAC, self.LINUX]:
text.config(highlightbackground=self._getContainerBg())
text.bind("<Tab>", self._focusNextWindow)
text.bind("<Shift-Tab>", self._focusLastWindow)
# add a right click menu
text.var = None
self._addRightClickMenu(text)
self.widgetManager.add(self.Widgets.TextArea, title, text)
self.logTextArea(title)
return text
def addTextArea(self, title, row=None, column=0, colspan=0, rowspan=0, text=None):
""" Adds a TextArea with the specified title
Simply calls internal _buildTextArea function before positioning the widget
:param title: the key used to reference this TextArea
:returns: the created TextArea
:raises ItemLookupError: if the title is already in use
"""
txt = self._buildTextArea(title, self.getContainer())
self._positionWidget(txt, row, column, colspan, rowspan, N+E+S+W)
if text is not None: self.setTextArea(title, text, callFunction=False)
return txt
def addScrolledTextArea(self, title, row=None, column=0, colspan=0, rowspan=0, text=None):
""" Adds a Scrollable TextArea with the specified title
Simply calls internal _buildTextArea functio, specifying a ScrollabelTextArea before positioning the widget
:param title: the key used to reference this TextArea
:returns: the created TextArea
:raises ItemLookupError: if the title is already in use
"""
txt = self._buildTextArea(title, self.getContainer(), True)
self._positionWidget(txt, row, column, colspan, rowspan, N+E+S+W)
if text is not None: self.setTextArea(title, text, callFunction=False)
return txt
def getTextArea(self, title):
""" Gets the text in the specified TextArea
:param title: the TextArea to check
:returns: the text in the specified TextArea
:raises ItemLookupError: if the title can't be found
"""
return self.widgetManager.get(self.Widgets.TextArea, title).getText()
def getAllTextAreas(self):
""" Convenience function to get the text for all TextAreas in the GUI.
:returns: a dictionary containing the result of calling getTextArea for every TextArea in the GUI
"""
areas = {}
for k in self.widgetManager.group(self.Widgets.TextArea):
areas[k] = self.getTextArea(k)
return areas
def textAreaCreateTag(self, title, name, **kwargs):
""" creates a new tag on the specified text area """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_config(name, **kwargs)
def textAreaChangeTag(self, title, name, **kwargs):
""" changes a tag on the specified text area """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_config(name, **kwargs)
def textAreaDeleteTag(self, title, *tags):
""" deletes the specified tag """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_delete(*tags)
def textAreaTagPattern(self, title, tag, pattern, regexp=False):
""" applies the tag to the specified text """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.highlightPattern(pattern, tag, regexp=regexp)
def textAreaTagRange(self, title, tag, start, end=END):
""" applies the tag to the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_add(tag, start, end)
def textAreaTagSelected(self, title, tag):
if self.widgetManager.get(self.Widgets.TextArea, title).tag_ranges(SEL):
self.textAreaTagRange(title, tag, SEL_FIRST, SEL_LAST)
self.widgetManager.get(self.Widgets.TextArea, title).focus_set()
def textAreaUntagRange(self, title, tag, start, end=END):
"""removes the tag from the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_remove(tag, start, end)
def textAreaToggleFontRange(self, title, tag, start, end=END):
""" will toggle the tag at the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
tag = ta.verifyFontTag(tag)
if tag in ta.tag_names(start):
ta.tag_remove("AJ_"+tag, start, end)
else:
self.textAreaApplyFontRange(title, tag, start, end)
def textAreaToggleFontSelected(self, title, tag):
if self.widgetManager.get(self.Widgets.TextArea, title).tag_ranges(SEL):
self.textAreaToggleFontRange(title, tag, SEL_FIRST, SEL_LAST)
self.widgetManager.get(self.Widgets.TextArea, title).focus_set()
def textAreaApplyFontSelected(self, title, tag):
if self.widgetManager.get(self.Widgets.TextArea, title).tag_ranges(SEL):
self.textAreaApplyFontRange(title, tag, SEL_FIRST, SEL_LAST)
self.widgetManager.get(self.Widgets.TextArea, title).focus_set()
def textAreaApplyFontRange(self, title, tag, start, end=END):
"""removes the tag from the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
tag = ta.verifyFontTag(tag)
if tag != "UNDERLINE":
ta.tag_remove("AJ_BOLD", start, end)
ta.tag_remove("AJ_ITALIC", start, end)
ta.tag_remove("AJ_BOLD_ITALIC", start, end)
ta.tag_add("AJ_" + tag, start, end)
def textAreaUntagSelected(self, title, tag):
if self.widgetManager.get(self.Widgets.TextArea, title).tag_ranges(SEL):
self.textAreaUntagRange(title, tag, SEL_FIRST, SEL_LAST)
self.widgetManager.get(self.Widgets.TextArea, title).focus_set()
def textAreaToggleTagRange(self, title, tag, start, end=END):
""" will toggle the tag at the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
if tag in ta.tag_names(start): self.textAreaUntagRange(title, tag, start, end)
else: self.textAreaTagRange(title, tag, start, end)
def textAreaToggleTagSelected(self, title, tag):
if self.widgetManager.get(self.Widgets.TextArea, title).tag_ranges(SEL):
self.textAreaToggleTagRange(title, tag, SEL_FIRST, SEL_LAST)
self.widgetManager.get(self.Widgets.TextArea, title).focus_set()
def searchTextArea(self, title, pattern, start=None, stop=None, nocase=True, backwards=False):
""" will find and highlight the specified text, returning the position """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
if start is None: start = ta.index(INSERT)
pos = ta.search(pattern, start, stopindex=stop, nocase=nocase, backwards=backwards)
ta.focus_set()
if pos == "":
return None
else:
end = str(pos) + " + " + str(len(pattern)) + " c"
ta.see(pos)
ta.tag_add(SEL, pos, end)
ta.mark_set("insert", pos)
return pos
def getTextAreaTag(self, title, tag):
""" returns all details about the specified tag """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
return ta.tag_config(tag)
def getTextAreaTags(self, title):
""" returns a list of all tags in the text area """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
return ta.tag_names()
def setTextAreaFont(self, title, **kwargs):
""" changes the font of a text area """
self.widgetManager.get(self.Widgets.TextArea, title).setFont(**kwargs)
def setTextArea(self, title, text, end=True, callFunction=True):
""" Add the supplied text to the specified TextArea
:param title: the TextArea to change
:param text: the text to add to the TextArea
:param end: where to insert the text, by default it is added to the end. Set end to False to add to the beginning.
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
:raises ItemLookupError: if the title can't be found
"""
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.pauseCallFunction(callFunction)
# in case it's disabled
_state = ta.cget('state')
ta.config(state='normal')
if end:
ta.insert(END, text)
else:
ta.insert('1.0', text)
ta.config(state=_state)
ta.resumeCallFunction()
def clearTextArea(self, title, callFunction=True):
""" Removes all text from the specified TextArea
:param title: the TextArea to change
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
:raises ItemLookupError: if the title can't be found
"""
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.pauseCallFunction(callFunction)
# in case it's disabled
_state = ta.cget('state')
ta.config(state='normal')
ta.delete('1.0', END)
ta.config(state=_state)
ta.resumeCallFunction()
def clearAllTextAreas(self, callFunction=False):
""" Convenience function to clear all TextAreas in the GUI
Will simply call clearTextArea on each TextArea
:param callFunction: whether to generate an event to notify that the widget has changed
:returns: None
"""
for ta in self.widgetManager.group(self.Widgets.TextArea):
self.clearTextArea(ta, callFunction=callFunction)
def highlightTextArea(self, title, start, end=END):
""" selects text in the specified range """
ta = self.widgetManager.get(self.Widgets.TextArea, title)
ta.tag_add(SEL, start, end)
def logTextArea(self, title):
""" Creates an md5 hash - can be used later to check if the TextArea has changed
The hash is stored in the widget
:param title: the TextArea to hash
:returns: None
:raises ItemLookupError: if the title can't be found
"""
self._loadHashlib()
if hashlib is False:
self.warn("Unable to log TextArea, hashlib library not available")
else:
text = self.widgetManager.get(self.Widgets.TextArea, title)
text.__hash = text.getTextAreaHash()
def textAreaChanged(self, title):
""" Creates a temporary md5 hash - and compares it with a previously generated & stored hash
The previous hash has to be generated manually, by calling logTextArea
:param title: the TextArea to hash
:returns: bool - True if the TextArea has changed or False if it hasn't
:raises ItemLookupError: if the title can't be found
"""
self._loadHashlib()
if hashlib is False:
self.warn("Unable to log TextArea, hashlib library not available")
else:
text = self.widgetManager.get(self.Widgets.TextArea, title)
return text.__hash != text.getTextAreaHash()
#####################################
# FUNCTIONS to add Tree Widgets
#####################################
def tree(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets trees all in one go """
widgKind = self.Widgets.Tree
click = kwargs.pop("click", None)
dblClick = kwargs.pop("dbl", None)
edit = kwargs.pop("edit", None)
editable = kwargs.pop("editable", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
tree = self.getTree(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
tree = self.addTree(title, value, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
if click is not None: self.setTreeClickFunction(title, click)
if edit is not None: self.setTreeEditFunction(title, edit)
if dblClick is not None: self.setTreeDoubleClickFunction(title, dblClick)
if editable is not None: self.setTreeEditable(title, editable)
return tree
def addTree(self, title, data, row=None, column=0, colspan=0, rowspan=0):
''' adds a navigatable tree, displaying the specified xml '''
self.widgetManager.verify(self.Widgets.Tree, title)
self._importAjtree()
if parseString is False:
self.warn("Unable to parse xml files. .addTree() not available")
return
xmlDoc = parseString(data)
frame = ScrollPane(
self.getContainer(),
relief=RAISED,
borderwidth=2,
bg="#FFFFFF",
highlightthickness=0,
takefocus=1)
self._positionWidget(frame, row, column, colspan, rowspan, "NSEW")
item = self._makeAjTreeData()(xmlDoc.documentElement)
node = self._makeAjTreeNode()(frame.getPane(), None, item)
self.widgetManager.add(self.Widgets.Tree, title, node)
# update() & expand() called in go() function
return node
def setTreeEditable(self, title, value=True):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.item.setCanEdit(value)
def setTreeBg(self, title, colour):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.setBgColour(colour)
def setTreeFg(self, title, colour):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.setFgColour(colour)
def setTreeHighlightBg(self, title, colour):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.setBgHColour(colour)
def setTreeHighlightFg(self, title, colour):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.setFgHColour(colour)
def setTreeColours(self, title, fg, bg, fgH, bgH):
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.setAllColours(bg, fg, bgH, fgH)
def setTreeDoubleClickFunction(self, title, func):
if func is not None:
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.item.registerDblClick(title, func)
def setTreeClickFunction(self, title, func):
if func is not None:
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.item.registerClick(title, func)
def setTreeEditFunction(self, title, func):
if func is not None:
tree = self.widgetManager.get(self.Widgets.Tree, title)
command = self.MAKE_FUNC(func, title)
tree.registerEditEvent(command)
# get whole tree as XML
def getTreeXML(self, title):
tree = self.widgetManager.get(self.Widgets.Tree, title)
return tree.item.node.toxml()
# get selected node as a string
def getTreeSelected(self, title):
tree = self.widgetManager.get(self.Widgets.Tree, title)
return tree.getSelectedText()
# get selected node (and children) as XML
def getTreeSelectedXML(self, title):
tree = self.widgetManager.get(self.Widgets.Tree, title)
item = tree.getSelected()
if item is not None:
return item.node.toxml()
else:
return None
def generateTree(self, title):
""" displays data inside tree """
tree = self.widgetManager.get(self.Widgets.Tree, title)
tree.update()
tree.expand()
#####################################
# FUNCTIONS to add Message Box
#####################################
def message(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets messages all in one go """
widgKind = self.Widgets.Message
try: self.widgetManager.verify(self.Widgets.Message, title)
except: # widget exists
if value is not None: self.setMessage(title, value)
msg = self.getMessage(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
msg = self._messageMaker(title, value, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return msg
def _messageMaker(self, title, text, row=None, column=0, colspan=0, rowspan=0, *args, **kwargs):
return self.addMessage(title, text, row, column, colspan, rowspan)
def addMessage(self, title, text=None, row=None, column=0, colspan=0, rowspan=0):
''' adds a message box, to display text across multiple lines '''
self.widgetManager.verify(self.Widgets.Message, title)
if text is None:
text = title
gui.trace("Not specifying text for messages (%s) now uses the title for the text. If you want an empty message, pass an empty string ''", title)
mess = Message(self.getContainer())
mess.config(text=text)
mess.config(font=self._getContainerProperty('labelFont'))
mess.config(justify=LEFT, background=self._getContainerBg())
mess.DEFAULT_TEXT = text
if self.platform in [self.MAC, self.LINUX]:
mess.config(highlightbackground=self._getContainerBg())
self.widgetManager.add(self.Widgets.Message, title, mess)
self._positionWidget(mess, row, column, colspan, rowspan)
# mess.bind("<Configure>", lambda e: mess.config(width=e.width-10))
return mess
def addEmptyMessage(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds an empty message box '''
return self.addMessage(title, "", row, column, colspan, rowspan)
def setMessage(self, title, text):
mess = self.widgetManager.get(self.Widgets.Message, title)
mess.config(text=text)
def setMessageAspect(self, title, aspect):
""" set a new aspect ratio for the text in this widget """
mess = self.widgetManager.get(self.Widgets.Message, title)
mess.config(aspect=aspect)
def clearMessage(self, title):
self.setMessage(title, "")
def getMessage(self, title):
mess = self.widgetManager.get(self.Widgets.Message, title)
return mess.cget("text")
#####################################
# FUNCTIONS for entry boxes
#####################################
def entry(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets entries all in one go """
widgKind = self.Widgets.Entry
default = kwargs.pop("default", None)
limit = kwargs.pop("limit", None)
case = kwargs.pop("case", None)
rows = kwargs.pop("rows", None)
secret = kwargs.pop("secret", False)
kind = kwargs.pop("kind", "standard").lower().strip()
labBg = kwargs.pop("labBg", None)
try: self.widgetManager.verify(self.Widgets.Entry, title)
except: # widget exists
if value is not None: self.setEntry(title, value, *args, **kwargs)
ent = self.getEntry(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
# create the entry widget
if kind == "auto":
ent = self._entryMaker(title, *args, secret=secret, kind=kind, words=value, **kwargs)
else:
ent = self._entryMaker(title, *args, secret=secret, kind=kind, **kwargs)
if not ent: return
# apply any setter values
if limit is not None: self.setEntryMaxLength(title, limit)
if case == "upper": self.setEntryUpperCase(title)
elif case == "lower": self.setEntryLowerCase(title)
if default is not None: self.setEntryDefault(title, default)
if kind != "auto":
if value is not None: self.setEntry(title, value)
else:
if rows is not None: self.setAutoEntryNumRows(title, rows)
if labBg is not None and self.widgetManager.get(self.Widgets.Entry, title).isValidation:
self.setValidationEntryLabelBg(title, labBg)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return ent
def setValidationEntryLabelBg(self, title, bg):
ent = self.widgetManager.get(self.Widgets.Entry, title)
if not ent.isValidation:
raise Exception("You can only set label BGs on validation entries")
ent.lab.config(bg=bg)
def _entryMaker(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False, label=False, kind="standard", words=None, **kwargs):
if not label:
frame = self.getContainer()
else:
frame = self._getLabelBox(title, label=label, **kwargs)
if kind == "standard":
ent = self._buildEntry(title, frame, secret)
elif kind == "numeric":
ent = self._buildEntry(title, frame, secret)
if self.validateNumeric is None:
self.validateNumeric = (self.containerStack[0]['container'].register(
self._validateNumericEntry), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
ent.isNumeric = True
ent.config(validate='key', validatecommand=self.validateNumeric)
self.setEntryTooltip(title, "Numeric data only.")
elif kind == "auto":
ent = self._buildEntry(title, frame, secret=False, words=words)
elif kind == "file":
ent = self._buildFileEntry(title, frame)
elif kind == "directory":
ent = self._buildFileEntry(title, frame, selectFile=False)
elif kind == "validation":
ent = self._buildValidationEntry(title, frame, secret)
else:
raise Exception("Invalid entry kind: %s", kind)
if not label:
self._positionWidget(ent, row, column, colspan, rowspan)
else:
self._packLabelBox(frame, ent)
self._positionWidget(frame, row, column, colspan, rowspan)
return ent
def addEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False):
''' adds an entry box for capturing text '''
return self._entryMaker(title, row, column, colspan, rowspan, secret=secret, label=False, kind="standard")
def addLabelEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False, label=True):
''' adds an entry box for capturing text, with the title as a label '''
return self._entryMaker(title, row, column, colspan, rowspan, secret, label=label)
def addSecretEntry(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds an entry box for capturing text, where the text is displayed as stars '''
return self._entryMaker(title, row, column, colspan, rowspan, True)
def addLabelSecretEntry(self, title, row=None, column=0, colspan=0, rowspan=0, label=True):
''' adds an entry box for capturing text, where the text is displayed as stars, with the title as a label '''
return self._entryMaker(title, row, column, colspan, rowspan, secret=True, label=label)
def addSecretLabelEntry(self, title, row=None, column=0, colspan=0, rowspan=0, label=True):
''' adds an entry box for capturing text, where the text is displayed as stars, with the title as a label '''
return self._entryMaker(title, row, column, colspan, rowspan, secret=True, label=label)
def addFileEntry(self, title, row=None, column=0, colspan=0, rowspan=0):
''' adds an entry box with a button, that pops-up a file dialog '''
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=False, kind="file")
def addLabelFileEntry(self, title, row=None, column=0, colspan=0, rowspan=0, label=True):
''' adds an entry box with a button, that pops-up a file dialog, with a label that displays the title '''
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=label, kind="file")
def addDirectoryEntry(self, title, row=None, column=0, colspan=0, rowspan=0):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=False, kind="directory")
def addLabelDirectoryEntry(self, title, row=None, column=0, colspan=0, rowspan=0, label=True):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=label, kind="directory")
def addValidationEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=False, kind="validation")
def addLabelValidationEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False, label=True):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=label, kind="validation")
def addAutoEntry(self, title, words, row=None, column=0, colspan=0, rowspan=0):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=False, kind="auto", words=words)
def addLabelAutoEntry(self, title, words, row=None, column=0, colspan=0, rowspan=0, secret=False, label=True):
return self._entryMaker(title, row, column, colspan, rowspan, secret=False, label=label, kind="auto", words=words)
def addNumericEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False):
return self._entryMaker(title, row, column, colspan, rowspan, secret=secret, label=False, kind="numeric")
def addLabelNumericEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False, label=True):
return self._entryMaker(title, row, column, colspan, rowspan, secret=secret, label=label, kind="numeric")
def addNumericLabelEntry(self, title, row=None, column=0, colspan=0, rowspan=0, secret=False, label=True):
return self._entryMaker(title, row, column, colspan, rowspan, secret=secret, label=label, kind="numeric")
def _getDirName(self, title):
self._getFileName(title, selectFile=False)
def _getFileName(self, title, selectFile=True):
if selectFile:
fileName = self.openBox()
else:
fileName = self.directoryBox()
if fileName is not None and fileName != "":
self.setEntry(title, fileName)
self.topLevel.after(250, self.setEntryFocus, title)
def _checkDirName(self, title):
if len(self.getEntry(title)) == 0:
self._getFileName(title, selectFile=False)
def _checkFileName(self, title):
if len(self.getEntry(title)) == 0:
self._getFileName(title)
def _buildEntry(self, title, frame, secret=False, words=[]):
self.widgetManager.verify(self.Widgets.Entry, title)
# if we are an autocompleter
if len(words) > 0:
ent = self._makeAutoCompleteEntry()(words, self._getTopLevel(), frame)
else:
var = StringVar(self.topLevel)
ent = entryBase(frame, textvariable=var)
ent.var = var
ent.var.auto_id = None
# for now - suppress UP/DOWN arrows
if self.platform in [self.MAC]:
def suppress(event):
if event.keysym == "Up":
# move home
event.widget.icursor(0)
event.widget.xview(0)
return "break"
elif event.keysym == "Down":
# move end
event.widget.icursor(END)
event.widget.xview(END)
return "break"
ent.bind("<Key>", suppress)
if not self.ttkFlag:
ent.config(font=self._getContainerProperty('inputFont'))
if self.platform in [self.MAC, self.LINUX]:
ent.config(highlightbackground=self._getContainerBg())
# vars to store any limit traces
ent.var.uc_id = None
ent.var.lc_id = None
ent.var.ml_id = None
ent.inContainer = False
ent.showingDefault = False # current status of entry
ent.default = "" # the default value to show (if set)
ent.DEFAULT_TEXT = "" # the default value for language support
ent.myTitle = title # the title of the entry
ent.isNumeric = False # if the entry is numeric
ent.isValidation = False # if the entry is validation
ent.isSecret = False # if the entry is secret
# configure it to be secret
if secret:
ent.config(show="*")
ent.isSecret = True
ent.bind("<Tab>", self._focusNextWindow)
ent.bind("<Shift-Tab>", self._focusLastWindow)
# add a right click menu
self._addRightClickMenu(ent)
self.widgetManager.add(self.Widgets.Entry, title, ent)
self.widgetManager.add(self.Widgets.Entry, title, ent.var, group=WidgetManager.VARS)
return ent
def _buildFileEntry(self, title, frame, selectFile=True):
vFrame = self._makeButtonBox()(frame)
self.widgetManager.log(self.Widgets.FrameBox, vFrame)
if not self.ttkFlag:
vFrame.config(background=self._getContainerBg())
vFrame.theWidget = self._buildEntry(title, vFrame)
vFrame.theWidget.inContainer = True
vFrame.theWidget.pack(expand=True, fill=X, side=LEFT)
if selectFile:
command = self.MAKE_FUNC(self._getFileName, title)
vFrame.theWidget.click_command = self.MAKE_FUNC(self._checkFileName, title)
text = "File"
default = "-- enter a filename --"
else:
command = self.MAKE_FUNC(self._getDirName, title)
vFrame.theWidget.click_command = self.MAKE_FUNC(self._checkDirName, title)
text = "Directory"
default = "-- enter a directory --"
self.setEntryDefault(title, default)
vFrame.theWidget.bind("<Button-1>", vFrame.theWidget.click_command, "+")
if not self.ttkFlag:
vFrame.theButton = Button(vFrame, font=self._getContainerProperty('buttonFont'))
else:
vFrame.theButton = ttk.Button(vFrame)
vFrame.theButton.config(text=text)
vFrame.theButton.config(command=command)
vFrame.theButton.pack(side=RIGHT, fill=X)
vFrame.theButton.inContainer = True
vFrame.theWidget.but = vFrame.theButton
if not self.ttkFlag and self.platform in [self.MAC, self.LINUX]:
vFrame.theButton.config(highlightbackground=self._getContainerBg())
return vFrame
def _buildValidationEntry(self, title, frame, secret):
vFrame = self._makeLabelBox()(frame)
self.widgetManager.log(self.Widgets.FrameBox, vFrame)
vFrame.isValidation = True
ent = self._buildEntry(title, vFrame, secret)
if not self.ttkFlag:
vFrame.config(background=self._getContainerBg())
ent.config(highlightthickness=2)
ent.pack(expand=True, fill=X, side=LEFT)
ent.isValidation = True
ent.inContainer = True
class ValidationLabel(labelBase, object):
def __init__(self, parent, *args, **options):
super(ValidationLabel, self).__init__(parent, *args, **options)
lab = ValidationLabel(vFrame)
lab.pack(side=RIGHT, fill=Y)
lab.config(font=self._getContainerProperty('labelFont'))
if not self.ttkFlag:
lab.config(background=self._getContainerBg())
lab.inContainer = True
lab.isValidation = True
ent.lab = lab
vFrame.theWidget = ent
vFrame.theLabel = lab
self.setEntryWaitingValidation(title)
return vFrame
def setEntryValid(self, title):
self.setValidationEntry(title, "valid")
def setEntryInvalid(self, title):
self.setValidationEntry(title, "invalid")
def setEntryWaitingValidation(self, title):
self.setValidationEntry(title, "wait")
def setValidationEntry(self, title, state="valid"):
entry = self.widgetManager.get(self.Widgets.Entry, title)
if not entry.isValidation:
self.warn("Entry %s is not a validation entry. Unable to set WAITING VALID.", title)
return
if state == "wait":
col = "#000000"
text = '\u2731'
eStyle="ValidationEntryWaiting.TEntry"
lStyle="ValidationEntryWaiting.TLabel"
elif state == "invalid":
col = "#FF0000"
text = '\u2716'
eStyle="ValidationEntryInvalid.TEntry"
lStyle="ValidationEntryInvalid.TLabel"
elif state == "valid":
col = "#4CC417"
text = '\u2714'
eStyle="ValidationEntryValid.TEntry"
lStyle="ValidationEntryValid.TLabel"
else:
self.warn("Invalid validation state: %s", state)
return
if not self.ttkFlag:
if not entry.showingDefault:
entry.config(fg=col)
entry.config(highlightbackground=col, highlightcolor=col)
entry.config(highlightthickness=1)
entry.lab.config(text=text, fg=col)
entry.oldFg = col
else:
if not entry.showingDefault:
entry.configure(style=eStyle)
entry.lab.config(text=text, style=lStyle)
entry.oldFg = eStyle
entry.lab.DEFAULT_TEXT = entry.lab.cget("text")
def appendAutoEntry(self, title, value):
entry = self.widgetManager.get(self.Widgets.Entry, title)
try:
entry.addWords(value)
except AttributeError:
gui.error("You can only append items to an AutoEntry, %s is not an AutoEntry.", title)
def removeAutoEntry(self, title, value):
entry = self.widgetManager.get(self.Widgets.Entry, title)
try:
entry.removeWord(value)
except AttributeError:
gui.error("You can only remove items from an AutoEntry, %s is not an AutoEntry.", title)
def changeAutoEntry(self, title, value):
entry = self.widgetManager.get(self.Widgets.Entry, title)
try:
entry.changeWords(value)
except AttributeError:
gui.error("You can only change items in an AutoEntry, %s is not an AutoEntry.", title)
def setAutoEntryNumRows(self, title, rows):
entry = self.widgetManager.get(self.Widgets.Entry, title)
try:
entry.setNumRows(rows)
except AttributeError:
gui.error("You can only change the number of rows in an AutoEntry, %s is not an AutoEntry.", title)
def _validateNumericEntry(self, action, index, value_if_allowed, prior_value, text, validation_type, trigger_type, widget_name):
if action == "1":
if str(text) in '0123456789.-+':
try:
if len(str(value_if_allowed)) == 1 and str(value_if_allowed) in '.-':
return True
elif len(str(value_if_allowed)) == 2 and str(value_if_allowed) == '-.':
return True
else:
float(value_if_allowed)
return True
except ValueError:
self.containerStack[0]['container'].bell()
return False
else:
self.containerStack[0]['container'].bell()
return False
else:
return True
def getEntry(self, name):
entry = self.widgetManager.get(self.Widgets.Entry, name)
if entry.showingDefault:
if entry.isNumeric:
return None
else:
return ""
else:
val = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS).get()
if entry.isNumeric:
if len(val) == 0 or (len(val) == 1 and val in '.-') or (len(val) == 2 and val == "-."):
return None
else:
return float(val)
else:
return val
def getAllEntries(self):
entries = {}
for k in self.widgetManager.group(self.Widgets.Entry):
entries[k] = self.getEntry(k)
return entries
def setEntry(self, name, text, callFunction=True):
ent = self.widgetManager.get(self.Widgets.Entry, name)
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
self._updateEntryDefault(name, mode="set")
# now call function
with PauseCallFunction(callFunction, var, False):
if not ent.isNumeric or self._validateNumericEntry("1", None, text, None, "1", None, None, None):
var.set(text)
def setEntryMaxLength(self, name, length):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
var.maxLength = length
if var.ml_id is not None:
var.trace_vdelete('w', var.ml_id)
var.ml_id = var.trace('w', self.MAKE_FUNC(self._limitEntry, name))
def setEntryUpperCase(self, name):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
if var.uc_id is not None:
var.trace_vdelete('w', var.uc_id)
var.uc_id = var.trace('w', self.MAKE_FUNC(self._upperEntry, name))
def setEntryLowerCase(self, name):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
if var.lc_id is not None:
var.trace_vdelete('w', var.lc_id)
var.lc_id = var.trace('w', self.MAKE_FUNC(self._lowerEntry, name))
def _limitEntry(self, name):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
if len(var.get()) > var.maxLength:
self.containerStack[0]['container'].bell()
var.set(var.get()[0:var.maxLength])
def _upperEntry(self, name):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
chars = var.get().upper()
var.set(chars)
def _lowerEntry(self, name):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
chars = var.get().lower()
var.set(chars)
def _entryIn(self, name):
self._updateEntryDefault(name, "in")
def _entryOut(self, name):
self._updateEntryDefault(name, "out")
def _updateEntryDefault(self, name, mode=None):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
entry = self.widgetManager.get(self.Widgets.Entry, name)
# ignore this if no default to apply
if entry.default == "":
return
# disable any limits
if var.lc_id is not None:
var.trace_vdelete('w', var.lc_id)
if var.uc_id is not None:
var.trace_vdelete('w', var.uc_id)
if var.ml_id is not None:
var.trace_vdelete('w', var.ml_id)
# disable any auto completion
if var.auto_id is not None:
var.trace_vdelete('w', var.auto_id)
current = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS).get()
# disable any change function
with PauseCallFunction(False, var, False):
# clear & remove default
if mode == "set" or (mode in [ "in", "clear"] and entry.showingDefault):
var.set("")
entry.showingDefault = False
entry.config(justify=entry.oldJustify)
if not self.ttkFlag:
entry.config(foreground=entry.oldFg)
else:
entry.configure(style=entry.oldFg)
if entry.isSecret:
entry.config(show="*")
elif mode == "out" and (current == "" or entry.showingDefault):
if entry.isSecret:
entry.config(show="")
var.set(entry.default)
entry.config(justify='center')
if not self.ttkFlag:
entry.config(foreground='grey')
else:
entry.configure(style="DefaultText.TEntry")
entry.showingDefault = True
elif mode == "update" and entry.showingDefault:
if entry.isSecret:
entry.config(show="")
var.set(entry.default)
# re-enable any limits
if var.lc_id is not None:
var.lc_id = var.trace('w', self.MAKE_FUNC(self._lowerEntry, name))
if var.uc_id is not None:
var.uc_id = var.trace('w', self.MAKE_FUNC(self._upperEntry, name))
if var.ml_id is not None:
var.ml_id = var.trace('w', self.MAKE_FUNC(self._limitEntry, name))
# re-enable auto completion
if var.auto_id is not None:
var.auto_id = var.trace('w', entry.textChanged)
def setEntryDefault(self, name, text="default"):
entry = self.widgetManager.get(self.Widgets.Entry, name)
self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
# remember current settings - to return to
if not hasattr(entry, "oldJustify"):
entry.oldJustify = entry.cget('justify')
if not hasattr(entry, "oldFg"):
if not self.ttkFlag:
entry.oldFg = entry.cget('foreground')
else:
entry.oldFg = entry.cget("style")
# configure default stuff
entry.default = text
entry.DEFAULT_TEXT = text
# only show new text if empty
self._updateEntryDefault(name, "out")
# bind commands to show/remove the default
if hasattr(entry, "defaultInEvent"):
entry.unbind(entry.defaultInEvent)
entry.unbind(entry.defaultOutEvent)
in_command = self.MAKE_FUNC(self._entryIn, name)
out_command = self.MAKE_FUNC(self._entryOut, name)
entry.defaultInEvent = entry.bind("<FocusIn>", in_command, add="+")
entry.defaultOutEvent = entry.bind("<FocusOut>", out_command, add="+")
def clearEntry(self, name, callFunction=True, setFocus=True):
var = self.widgetManager.get(self.Widgets.Entry, name, group=WidgetManager.VARS)
# now call function
with PauseCallFunction(callFunction, var, False):
var.set("")
self._updateEntryDefault(name, mode="clear")
if setFocus: self.setFocus(name)
def clearAllEntries(self, callFunction=False):
for entry in self.widgetManager.group(self.Widgets.Entry, group=WidgetManager.VARS):
self.clearEntry(entry, callFunction=callFunction, setFocus=False)
def setFocus(self, name):
entry = self.widgetManager.get(self.Widgets.Entry, name)
entry.focus_set()
def getFocus(self):
widg = self.topLevel.focus_get()
return self._getWidgetName(widg)
####################################
## Functions to get widget details
####################################
def _lookupValue(self, myDict, val):
for name in myDict:
if isinstance(myDict[name], type([])): # array of cbs
for rb in myDict[name]:
if rb == val:
return name
else:
if myDict[name] == val:
return name
return None
def _getWidgetName(self, widg):
name = gui.GET_WIDGET_TYPE(widg)
if name.lower() == "tk":
return self._getTopLevel().title()
elif name == "Listbox":
return self._lookupValue(self.widgetManager.group(self.Widgets.ListBox), widg)
elif name == "Button":
# merge together Buttons & Toolbar Buttons
z = self.widgetManager.group(self.Widgets.Button).copy()
z.update(self.widgetManager.group(self.Widgets.Toolbar))
return self._lookupValue(z, widg)
elif name == "Entry":
return self._lookupValue(self.widgetManager.group(self.Widgets.Entry), widg)
elif name == "Scale":
return self._lookupValue(self.widgetManager.group(self.Widgets.Scale), widg)
elif name == "Checkbutton":
return self._lookupValue(self.widgetManager.group(self.Widgets.CheckBox), widg)
elif name == "Radiobutton":
return self._lookupValue(self.widgetManager.group(self.Widgets.RadioButton), widg)
elif name == "Spinbox":
return self._lookupValue(self.widgetManager.group(self.Widgets.SpinBox), widg)
elif name == "OptionMenu":
return self._lookupValue(self.widgetManager.group(self.Widgets.OptionBox), widg)
elif name == "Text":
return self._lookupValue(self.widgetManager.group(self.Widgets.TextArea), widg)
elif name == "Link":
return self._lookupValue(self.widgetManager.group(self.Widgets.Link), widg)
else:
raise Exception("Unknown widget type: " + name)
#####################################
# FUNCTIONS for progress bars (meters)
#####################################
def meter(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets meters all in one go """
widgKind = self.Widgets.Meter
kind = kwargs.pop("kind","'meter")
fill = kwargs.pop("fill", None)
text = kwargs.pop("text", None)
try: self.widgetManager.verify(self.Widgets.Meter, title)
except: # widget exists
meter = self.getMeter(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if kind == "split": meter = self._addMeter(title, "SPLIT", **kwargs)
elif kind == "dual": meter = self._addMeter(title, "DUAL", **kwargs)
else: meter = self._addMeter(title, "METER", **kwargs)
if value is not None: self.setMeter(title, value, text=text)
if fill is not None: self.setMeterFill(title, fill)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return meter
def _addMeter(self, name, kind="METER", row=None, column=0, colspan=0, rowspan=0, **kwargs):
self.widgetManager.verify(self.Widgets.Meter, name)
if kind == "SPLIT":
meter = SplitMeter(self.getContainer(), font=self._getContainerProperty('labelFont'))
elif kind == "DUAL":
meter = DualMeter(self.getContainer(), font=self._getContainerProperty('labelFont'))
else:
meter = Meter(self.getContainer(), font=self._getContainerProperty('labelFont'))
self.widgetManager.add(self.Widgets.Meter, name, meter)
self._positionWidget(meter, row, column, colspan, rowspan)
return meter
def addMeter(self, name, row=None, column=0, colspan=0, rowspan=0):
return self._addMeter(name, "METER", row, column, colspan, rowspan)
def addSplitMeter(self, name, row=None, column=0, colspan=0, rowspan=0):
return self._addMeter(name, "SPLIT", row, column, colspan, rowspan)
def addDualMeter(self, name, row=None, column=0, colspan=0, rowspan=0):
return self._addMeter(name, "DUAL", row, column, colspan, rowspan)
# update the value of the specified meter
# note: expects a value between 0 (-100 for split/dual) & 100
def setMeter(self, name, value=0.0, text=None):
item = self.widgetManager.get(self.Widgets.Meter, name)
item.set(value, text)
def getMeter(self, name):
item = self.widgetManager.get(self.Widgets.Meter, name)
return item.get()
def getAllMeters(self):
meters = {}
for k in self.widgetManager.group(self.Widgets.Meter):
meters[k] = self.getMeter(k)
return meters
# a single colour for meters, a list of 2 colours for splits & duals
def setMeterFill(self, name, colour):
item = self.widgetManager.get(self.Widgets.Meter, name)
item.configure(fill=colour)
#####################################
# FUNCTIONS for seperators
#####################################
def separator(self, *args, **kwargs):
""" simpleGUI - adds horizontal/vertical separators """
direction = kwargs.pop("direction", "horizontal").lower()
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
if direction == "vertical":
return self.addVerticalSeparator(*args, **kwargs)
else:
return self.addHorizontalSeparator(*args, **kwargs)
def addHorizontalSeparator(self, row=None, column=0, colspan=0, rowspan=0, colour=None):
return self._addSeparator("horizontal", row, column, colspan, rowspan, colour)
def addVerticalSeparator(self, row=None, column=0, colspan=0, rowspan=0, colour=None):
return self._addSeparator("vertical", row, column, colspan, rowspan, colour)
def _addSeparator(self, orient, row=None, column=0, colspan=0, rowspan=0, colour=None):
sep = self._makeSeparator()(self.getContainer(), orient)
if colour is not None:
sep.configure(fg=colour)
self.widgetManager.log(self.Widgets.Separator, sep)
self._positionWidget(sep, row, column, colspan, rowspan)
return sep
#####################################
# FUNCTIONS for pie charts
#####################################
def pie(self, title, value=None, *args, **kwargs):
""" simpleGUI - adds, sets & gets pies all in one go """
widgKind = self.Widgets.PieChart
name = kwargs.pop("name", None)
try: self.widgetManager.verify(widgKind, title)
except: # widget exists
if name is not None: self.setPieChart(title, name, value)
pie = self.getPieChart(title)
else: # new widget
kwargs = self._parsePos(kwargs.pop("pos", []), kwargs)
pie = self.addPieChart(title, value, *args, **kwargs)
if len(kwargs) > 0:
self._configWidget(title, widgKind, **kwargs)
return pie
def addPieChart(self, name, fracs, row=None, column=0, colspan=0, rowspan=0):
self.widgetManager.verify(self.Widgets.PieChart, name)
self._loadTooltip()
pie = PieChart(self.getContainer(), fracs, self._getContainerBg())
self.widgetManager.add(self.Widgets.PieChart, name, pie)
self._positionWidget(pie, row, column, colspan, rowspan, sticky=None)
return pie
def setPieChart(self, title, name, value):
pie = self.widgetManager.get(self.Widgets.PieChart, title)
pie.setValue(name, value)
#####################################
# FUNCTIONS for toolbar
#####################################
# adds a list of buttons along the top - like a tool bar...
def addToolbarButton(self, name, func, findIcon=False):
self.addToolbar([name], func, findIcon)
def toolbar(self, names, funcs, **kwargs):
""" simpleGUI - shortener for toolbar """
icons = kwargs.pop('icons', kwargs.pop('findIcon', False))
pinned = kwargs.pop('pinned', None)
disabled = kwargs.pop('disabled', None)
hidden = kwargs.pop('hidden', None)
self.addToolbar(names, funcs, findIcon=icons)
if pinned is not None: self.setToolbarPinned(pinned=pinned)
if disabled is not None: self.setToolbarDisabled(disabled=disabled)
if hidden is True: self.hideToolbar()
def addToolbar(self, names, funcs, findIcon=False, **kwargs):
# hide the tbm bar
if self.tbMinMade:
self.tbm.pack_forget()
# make sure the toolbar is showing
try:
self.tb.pack_info()
except:
self.tb.pack(before=self.containerStack[0]['container'], side=TOP, fill=X)
if not self.hasTb:
self.hasTb = True
image = None
singleFunc = self._checkFunc(names, funcs)
if not isinstance(names, list):
names = [names]
for i in range(len(names)):
t = names[i]
if (t in self.widgetManager.group(self.Widgets.Toolbar)):
raise Exception(
"Invalid toolbar button name: " +
t +
" already exists")
if findIcon:
# turn off warnings about PNGs
with PauseLogger():
imgFile = os.path.join(self.icon_path, t.lower() + ".png")
try:
image = self._getImage(imgFile)
except Exception as e:
image = None
if not self.ttkFlag:
but = Button(self.tb)
but.config(relief=FLAT, font=self._buttonFont)
else:
but = ttk.Button(self.tb)
self.widgetManager.add(self.Widgets.Toolbar, t, but)
if singleFunc is not None:
u = self.MAKE_FUNC(singleFunc, t)
else:
u = self.MAKE_FUNC(funcs[i], t)
but.config(command=u)
if image is not None:
# works on Mac & Windows :)
but.config(image=image)
but.image = image
if not self.ttkFlag:
but.config(justify=LEFT, compound=TOP)
else:
but.config(style="Toolbar.TButton")
else:
but.config(text=t)
but.pack(side=LEFT, padx=2, pady=2)
but.tt_var = self._addTooltip(but, t.title(), True)
but.DEFAULT_TEXT=t
def _setPinBut(self):
# only call this once
if self.pinBut is not None:
return
# try to get the icon, if none - then set but to None, and ignore from now on
imgFile = os.path.join(self.icon_path, "pin.gif")
try:
imgObj = self._getImage(imgFile)
self.pinBut = Label(self.tb)
except:
return
# if image found, then set up the label
if self.pinBut is not None:
self.pinBut.config(image=imgObj)#, compound=TOP, text="", justify=LEFT)
self.pinBut.image = imgObj # keep a reference!
self.pinBut.pack(side=RIGHT, anchor=NE, padx=0, pady=0)
if gui.GET_PLATFORM() == gui.MAC:
self.pinBut.config(cursor="pointinghand")
elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]:
self.pinBut.config(cursor="hand2")
self.pinBut.eventId = self.pinBut.bind("<Button-1>", self._toggletb)
self._addTooltip(self.pinBut, "Click here to pin/unpin the toolbar.", True)
# called by pinBut, to toggle the pin status of the toolbar
def _toggletb(self, event=None):
self.setToolbarPinned(not self.tbPinned)
def setToolbarPinned(self, pinned=True):
self.tbPinned = pinned
self._setPinBut()
if not self.tbPinned:
if self.pinBut is not None:
try:
self.pinBut.image = self._getImage(os.path.join(self.icon_path, "unpin.gif"))
except:
pass
if not self.tbMinMade:
self.tbMinMade = True
self.tbm = Frame(self.appWindow, bd=1, relief=RAISED)
self.tbm.config(bg="gray", height=3)
self.tb.bind("<Leave>", self._minToolbar)
self.tbm.bind("<Enter>", self._maxToolbar)
self._minToolbar()
else:
if self.pinBut is not None:
try:
self.pinBut.image = self._getImage(os.path.join(self.icon_path, "pin.gif"))
except:
pass
self._maxToolbar()
if self.pinBut is not None:
self.pinBut.config(image=self.pinBut.image)
def setToolbarIcon(self, name, icon):
if (name not in self.widgetManager.group(self.Widgets.Toolbar)):
raise Exception("Unknown toolbar name: " + name)
imgFile = os.path.join(self.icon_path, icon.lower() + ".png")
with PauseLogger():
self.setToolbarImage(name, imgFile)
self.widgetManager.get(self.Widgets.Toolbar, name).tt_var.set(icon)
def setToolbarImage(self, name, imgFile):
if (name not in self.widgetManager.group(self.Widgets.Toolbar)):
raise Exception("Unknown toolbar name: " + name)
image = self._getImage(imgFile)
self.widgetManager.get(self.Widgets.Toolbar, name).config(image=image)
self.widgetManager.get(self.Widgets.Toolbar, name).image = image
def removeToolbarButton(self, name, hide=True):
if (name not in self.widgetManager.group(self.Widgets.Toolbar)):
raise Exception("Unknown toolbar name: " + name)
self.widgetManager.get(self.Widgets.Toolbar, name).destroy()
self.widgetManager.remove(self.Widgets.Toolbar, name)
if hide:
if len(self.widgetManager.group(self.Widgets.Toolbar)) == 0:
self.tb.pack_forget()
self.hasTb = False
if self.tbMinMade:
self.tbm.pack_forget()
def removeToolbar(self, hide=True):
while len(self.widgetManager.group(self.Widgets.Toolbar)) > 0:
self.removeToolbarButton(list(self.widgetManager.group(self.Widgets.Toolbar))[0], hide)
def setToolbarButtonEnabled(self, name):
self.setToolbarButtonDisabled(name, False)
def setToolbarButtonDisabled(self, name, disabled=True):
if (name not in self.widgetManager.group(self.Widgets.Toolbar)):
raise Exception("Unknown toolbar name: " + name)
if disabled:
self.widgetManager.get(self.Widgets.Toolbar, name).config(state=DISABLED)
else:
self.widgetManager.get(self.Widgets.Toolbar, name).config(state=NORMAL)
def setToolbarEnabled(self):
self.setToolbarDisabled(False)
def setToolbarDisabled(self, disabled=True):
for but in self.widgetManager.group(self.Widgets.Toolbar).keys():
if disabled:
self.widgetManager.get(self.Widgets.Toolbar, but).config(state=DISABLED)
else:
self.widgetManager.get(self.Widgets.Toolbar, but).config(state=NORMAL)
if self.pinBut is not None:
if disabled:
# this fails if not bound
if self.pinBut.eventId:
self.pinBut.unbind("<Button-1>", self.pinBut.eventId)
self.pinBut.eventId = None
self._disableTooltip(self.pinBut)
self.pinBut.config(cursor="")
else:
if gui.GET_PLATFORM() == gui.MAC:
self.pinBut.config(cursor="pointinghand")
elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]:
self.pinBut.config(cursor="hand2")
self.pinBut.eventId = self.pinBut.bind("<Button-1>", self._toggletb)
self._enableTooltip(self.pinBut)
def _minToolbar(self, e=None):
if not self.tbPinned:
if self.tbMinMade:
self.tbm.config(width=self.tb.winfo_reqwidth())
self.tbm.pack(before=self.containerStack[0]['container'], side=TOP, fill=X)
self.tb.pack_forget()
def _maxToolbar(self, e=None):
self.tb.pack(before=self.containerStack[0]['container'], side=TOP, fill=X)
if self.tbMinMade:
self.tbm.pack_forget()
# functions to hide & show the toolbar
def hideToolbar(self):
if self.hasTb:
self.tb.pack_forget()
if self.tbMinMade:
self.tbm.pack_forget()
def showToolbar(self):
if self.hasTb:
self.tb.pack(before=self.containerStack[0]['container'], side=TOP, fill=X)
if self.tbMinMade:
self.tbm.pack_forget()
# Method to get all inputs.
def getAllInputs(self, **kwargs):
"""Get all values, merge & return as a single dictionary.
:param kwargs: will be _appended_ to the input list.
Note, empty pairs from each input is stripped, existing keys
will not be overridden!
"""
# All available inputs.
inputs = filter(None, [
self.getAllEntries(),
self.getAllOptionBoxes(),
self.getAllSpinBoxes(),
self.getAllListBoxes(),
self.getAllProperties(),
self.getAllCheckBoxes(),
self.getAllRadioButtons(),
self.getAllScales(),
self.getAllMeters(),
self.getAllDatePickers(),
kwargs,
])
result = data = dict()
for pairs in inputs:
for key, val in pairs.items():
# Try and strip values.
try:
val = val.strip()
except AttributeError:
pass
try:
# Skip if value is empty or if key already exists.
if not val or result[key]:
continue
except KeyError:
pass
result[key] = val
return result
#####################################
# FUNCTIONS for menu bar
#####################################
def _initMenu(self):
# create a menu bar - only shows if populated
if not self.hasMenu:
# self.topLevel.option_add('*tearOff', FALSE)
self.hasMenu = True
self.menuBar = Menu(self.topLevel)
if self.platform == self.MAC:
appmenu = Menu(self.menuBar, name='apple')
self.menuBar.add_cascade(menu=appmenu)
self.widgetManager.add(self.Widgets.Menu, "MAC_APP", appmenu)
elif self.platform == self.WINDOWS:
# sysMenu must be added last, otherwise other menus vanish
sysMenu = Menu(self.menuBar, name='system', tearoff=False)
self.widgetManager.add(self.Widgets.Menu, "WIN_SYS", sysMenu)
# add a parent menu, for menu items
def createMenu(self, title, tearable=False, showInBar=True):
self.widgetManager.verify(self.Widgets.Menu, title)
self._initMenu()
if title == "WIN_SYS" and self.platform != self.WINDOWS:
self.warn("The WIN_SYS menu is specific to Windows")
return None
if self.platform == self.MAC and tearable:
self.warn("Tearable menus (%s) not supported on MAC", title)
tearable = False
theMenu = Menu(self.menuBar, tearoff=tearable)
if showInBar:
self.menuBar.add_cascade(label=title, menu=theMenu)
self.widgetManager.add(self.Widgets.Menu, title, theMenu)
return theMenu
def createRightClickMenu(self, title, showInBar=False):
men = self.createMenu(title, False, showInBar)
men.bind("<FocusOut>", lambda e: men.unpost())
return men
def _bindRightClick(self, item, value):
if self.platform in [self.WINDOWS, self.LINUX]:
item.bind('<Button-3>', lambda e, menu=value: self._rightClick(e, menu))
else:
item.bind('<Button-2>', lambda e, menu=value: self._rightClick(e, menu))
# add items to the named menu
def addMenuItem(self, title, item, func=None, kind=None, shortcut=None, underline=-1, rb_id=None, createBinding=True):
# set the initial menubar
self._initMenu()
# get or create an initial menu
if title is not None:
try:
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
except:
theMenu = self.createMenu(title)
if theMenu is None:
return
if underline > -1 and self.platform == self.MAC:
self.warn("Underlining menu items not available on MAC")
if func is not None:
u = self.MAKE_FUNC(func, item)
else:
u = None
a = b = None
if shortcut is not None:
# MODIFIERS=["Control", "Ctrl", "Option", "Opt", "Alt", "Shift", "Command", "Cmd", "Meta"]
# UGLY formatting of accelerator & shortcut
a = b = shortcut.lower().replace("+", "-")
a = a.replace("control", "ctrl")
a = a.replace("command", "cmd")
a = a.replace("option", "opt")
a = a.replace("key-", "")
b = b.replace("ctrl", "Control")
b = b.replace("control", "Control")
b = b.replace("cmd", "Command")
b = b.replace("command", "Command")
b = b.replace("option", "Option")
b = b.replace("opt", "Option")
b = b.replace("alt", "Alt")
b = b.replace("shift", "Shift")
b = b.replace("meta", "Meta")
b = b.replace("key", "Key")
if gui.GET_PLATFORM() != gui.MAC:
a = a.replace("cmd", "ctrl")
b = b.replace("Command", "Control")
# try to fix numerics
if b[-1] in "0123456789" and "Key" not in b:
b = b[:-1] + "Key-" + b[-1]
b = "<" + b + ">"
a = a.title()
gui.trace("Adding accelerator: %s", a)
self.widgetManager.verify(self.Widgets.Accelerators, a, array=True)
self.widgetManager.log(self.Widgets.Accelerators, a)
if u is not None and createBinding:
gui.trace("Binding: %s to %s", b, u)
self.topLevel.bind_all(b, u)
if item == "-" or kind == "separator":
theMenu.add_separator()
elif kind == "topLevel" or title is None:
if self.platform == self.MAC:
self.warn("Unable to make topLevel menus (%s) on Mac", item)
else:
self.menuBar.add_command(
label=item, command=u, accelerator=a, underline=underline)
elif kind == "rb":
varName = title + "rb" + item
newRb = False
if (varName in self.widgetManager.group(self.Widgets.Menu, group=WidgetManager.VARS)):
var = self.widgetManager.get(self.Widgets.Menu, varName, group=WidgetManager.VARS)
else:
newRb = True
var = StringVar(self.topLevel)
self.widgetManager.add(self.Widgets.Menu, varName, var, group=WidgetManager.VARS)
theMenu.add_radiobutton(label=rb_id, command=u, variable=var, value=rb_id, accelerator=a, underline=underline)
if newRb:
self.setMenuRadioButton(title, item, rb_id)
elif kind == "cb":
varName = title + "cb" + item
self.widgetManager.verify(self.Widgets.Menu, varName, group=WidgetManager.VARS)
var = StringVar(self.topLevel)
self.widgetManager.add(self.Widgets.Menu, varName, var, group=WidgetManager.VARS)
theMenu.add_checkbutton(label=item, command=u, variable=var, onvalue=1, offvalue=0, accelerator=a, underline=underline)
elif kind == "sub":
self.widgetManager.verify(self.Widgets.Menu, item)
subMenu = Menu(theMenu, tearoff=False)
self.widgetManager.add(self.Widgets.Menu, item, subMenu)
theMenu.add_cascade(menu=subMenu, label=item)
else:
theMenu.add_command(label=item, command=u, accelerator=a, underline=underline)
#################
# wrappers for other menu types
def addMenuList(self, menuName, names, funcs):
# deal with a dict_keys object - messy!!!!
if not isinstance(names, list):
names = list(names)
# append some Nones, if it's a list and contains separators
if funcs is not None:
if not callable(funcs):
seps = names.count("-")
for i in range(seps):
funcs.append(None)
singleFunc = self._checkFunc(names, funcs)
# add menu items
for t in names:
if funcs is None:
u = None
elif singleFunc is not None:
u = singleFunc
else:
u = funcs.pop(0)
self.addMenuItem(menuName, t, u)
def _prepareCopyAndPasteMenu(self, event, widget=None):
if self.copyAndPaste.inUse:
if event is not None:
widget = event.widget
self.disableMenu("EDIT", 10)
self.copyAndPaste.setUp(widget)
if self.copyAndPaste.canCopy:
self.enableMenuItem("EDIT", "Copy")
if self.copyAndPaste.canCut:
self.enableMenuItem("EDIT", "Cut")
if self.copyAndPaste.canPaste:
self.enableMenuItem("EDIT", "Paste")
self.enableMenuItem("EDIT", "Clear Clipboard")
if self.copyAndPaste.canSelect:
self.enableMenuItem("EDIT", "Select All")
self.enableMenuItem("EDIT", "Clear All")
if self.copyAndPaste.canUndo:
self.enableMenuItem("EDIT", "Undo")
if self.copyAndPaste.canRedo:
self.enableMenuItem("EDIT", "Redo")
if self.copyAndPaste.canFont:
self.enableMenuItem("EDIT", "Bold")
self.enableMenuItem("EDIT", "Italic")
self.enableMenuItem("EDIT", "Bold & Italic")
self.enableMenuItem("EDIT", "Underline")
return True
else:
return False
# called when copy/paste menu items are clicked
def _copyAndPasteHelper(self, menu):
if menu == "Cut":
self.copyAndPaste.cut()
elif menu == "Copy":
self.copyAndPaste.copy()
elif menu == "Paste":
self.copyAndPaste.paste()
elif menu == "Select All":
self.copyAndPaste.selectAll()
elif menu == "Clear Clipboard":
self.copyAndPaste.clearClipboard()
elif menu == "Clear All":
self.copyAndPaste.clearText()
elif menu == "Undo":
self.copyAndPaste.undo()
elif menu == "Redo":
self.copyAndPaste.redo()
elif menu in ["BOLD", "ITALIC", "UNDERLINE", "BOLD_ITALIC"]:
self.copyAndPaste.font("AJ_"+menu)
# add a single entry for a menu
def addSubMenu(self, menu, subMenu):
self.addMenuItem(menu, subMenu, None, "sub")
def addMenu(self, name, func, shortcut=None, underline=-1):
self.addMenuItem(None, name, func, "topLevel", shortcut, underline)
def addMenuSeparator(self, menu):
self.addMenuItem(menu, "-")
def addMenuCheckBox(self, menu, name, func=None, shortcut=None, underline=-1):
self.addMenuItem(menu, name, func, "cb", shortcut, underline)
def addMenuRadioButton(self, menu, name, value, func=None, shortcut=None, underline=-1):
self.addMenuItem(menu, name, func, "rb", shortcut, underline, value)
#################
# wrappers for setters
def _setMenu(self, menu, title, value, kind):
title = menu + kind + title
var = self.widgetManager.get(self.Widgets.Menu, title, group=WidgetManager.VARS)
if kind == "rb":
var.set(value)
elif kind == "cb":
if value is True:
var.set("1")
elif value is False:
var.set("0")
else:
if var.get() == "1":
var.set("0")
else:
var.set("1")
def setMenuCheckBox(self, menu, name, value=None):
self._setMenu(menu, name, value, "cb")
def setMenuRadioButton(self, menu, name, value):
self._setMenu(menu, name, value, "rb")
# set align = "none" to remove text
def setMenuImage(self, menu, title, image, align="left"):
theMenu = self.widgetManager.get(self.Widgets.Menu, menu)
imageObj = self._getImage(image)
if 16 != imageObj.width() or imageObj.width() != imageObj.height():
self.warn("Invalid image resolution for menu item %s (%s) - should be 16x16", title, image)
#imageObj = imageObj.subsample(2,2)
theMenu.entryconfigure(title, image=imageObj, compound=align)
def setMenuIcon(self, menu, title, icon, align="left"):
image = os.path.join(self.icon_path, icon.lower() + ".png")
with PauseLogger():
self.setMenuImage(menu, title, image, align)
def disableMenubar(self):
for theMenu in self.widgetManager.group(self.Widgets.Menu):
self.disableMenu(theMenu)
# loop through top level menus
# and diable any that got missed
numMenus = self.menuBar.index("end")
if numMenus is not None:
for item in range(numMenus+1):
self.menuBar.entryconfig(item, state=DISABLED)
def enableMenubar(self):
for theMenu in self.widgetManager.group(self.Widgets.Menu):
self.enableMenu(theMenu)
# loop through toplevel menus
# and enable anythat got missed
numMenus = self.menuBar.index("end")
if numMenus is not None:
for item in range(numMenus+1):
self.menuBar.entryconfig(item, state=NORMAL)
def disableMenu( self, title, limit=None):
self._changeMenuState(title, DISABLED, limit)
def enableMenu( self, title, limit=None):
self._changeMenuState(title, NORMAL, limit)
def _changeMenuState(self, title, state, limit=None):
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
numMenus = theMenu.index("end")
if numMenus is not None: # MAC_APP (and others?) returns None
for item in range(numMenus + 1):
if limit is not None and limit == item:
break
try:
theMenu.entryconfigure(item, state=state)
except:
pass # separator
# also diable the toplevel menu that matches this one
try:
self.menuBar.entryconfig(self.menuBar.index(title), state=state)
except TclError:
# ignore if we fail...
pass
def disableMenuItem(self, title, item):
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
theMenu.entryconfigure(item, state=DISABLED)
def enableMenuItem(self, title, item):
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
theMenu.entryconfigure(item, state=NORMAL)
def renameMenu(self, title, newName):
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
self.menuBar.entryconfigure(title, label=newName)
def renameMenuItem(self, title, item, newName):
theMenu = self.widgetManager.get(self.Widgets.Menu, title)
theMenu.entryconfigure(item, label=newName)
#################
# wrappers for getters
def _getMenu(self, menu, title, kind):
title = menu + kind + title
var = self.widgetManager.get(self.Widgets.Menu, title, group=WidgetManager.VARS)
if kind == "rb":
return var.get()
elif kind == "cb":
if var.get() == "1":
return True
else:
return False
def getMenuCheckBox(self, menu, title):
return self._getMenu(menu, title, "cb")
def getMenuRadioButton(self, menu, title):
return self._getMenu(menu, title, "rb")
#################
# wrappers for platform specific menus
# enables the preferences item in the app menu
def addMenuPreferences(self, func):
if self.platform == self.MAC:
self._initMenu()
u = self.MAKE_FUNC(func, "preferences")
self.topLevel.createcommand('tk::mac::ShowPreferences', u)
else:
self.warn("The Preferences Menu is specific to Mac OSX")
# MAC help menu
def addMenuHelp(self, func):
if self.platform == self.MAC:
self._initMenu()
helpMenu = Menu(self.menuBar, name='help')
self.menuBar.add_cascade(menu=helpMenu, label='Help')
u = self.MAKE_FUNC(func, "help")
self.topLevel.createcommand('tk::mac::ShowHelp', u)
self.widgetManager.add(self.Widgets.Menu, "MAC_HELP", helpMenu)
else:
self.warn("The Help Menu is specific to Mac OSX")
# Shows a Window menu
def addMenuWindow(self):
if self.platform == self.MAC:
self._initMenu()
windowMenu = Menu(self.menuBar, name='window')
self.menuBar.add_cascade(menu=windowMenu, label='Window')
self.widgetManager.add(self.Widgets.Menu, "MAC_WIN", windowMenu)
else:
self.warn("The Window Menu is specific to Mac OSX")
def disableMenuEdit(self):
self.copyAndPaste.inUse = False
# adds an edit menu - by default only as a pop-up
# if inMenuBar is True - then show in menu too
def addMenuEdit(self, inMenuBar=False):
self._initMenu()
self.copyAndPaste.inUse = True
# in case we already made the menu - just return
try: self.widgetManager.verify(self.Widgets.Menu, "EDIT")
except: return
editMenu = Menu(self.menuBar, tearoff=False)
editMenu.bind("<FocusOut>", lambda e: editMenu.unpost())
if inMenuBar:
self.menuBar.add_cascade(menu=editMenu, label='Edit ')
self.widgetManager.add(self.Widgets.Menu, "EDIT", editMenu)
if gui.GET_PLATFORM() == gui.MAC:
shortcut = "Cmd+"
else:
shortcut = "Control-"
eList = [
('Cut', lambda e: self._copyAndPasteHelper("Cut"), "X", False),
('Copy', lambda e: self._copyAndPasteHelper("Copy"), "C", False),
('Paste', lambda e: self._copyAndPasteHelper("Paste"), "V", False),
('Select All', lambda e: self._copyAndPasteHelper("Select All"), "A", True if gui.GET_PLATFORM() == gui.MAC else False),
('Clear Clipboard', lambda e: self._copyAndPasteHelper("Clear Clipboard"), "", False)
]
for (txt, cmd, sc, bind) in eList:
acc = shortcut + sc
self.addMenuItem("EDIT", txt, cmd, shortcut=acc, createBinding=bind)
# add a clear option
self.addMenuSeparator("EDIT")
self.addMenuItem("EDIT", "Clear All", lambda e: self._copyAndPasteHelper("Clear All"))
self.addMenuSeparator("EDIT")
self.addMenuItem("EDIT", 'Undo', lambda e: self._copyAndPasteHelper("Undo"), shortcut=shortcut + "Z", createBinding=False)
self.addMenuItem("EDIT", 'Redo', lambda e: self._copyAndPasteHelper( "Redo"), shortcut="Shift-" + shortcut + "Z", createBinding=True)
self.addMenuSeparator("EDIT")
self.addMenuItem("EDIT", "Bold", lambda e: self._copyAndPasteHelper("BOLD"), shortcut=shortcut+"B")
self.addMenuItem("EDIT", "Italic", lambda e: self._copyAndPasteHelper("ITALIC"), shortcut=shortcut+"I")
self.addMenuItem("EDIT", "Underline", lambda e: self._copyAndPasteHelper("UNDERLINE"), shortcut=shortcut+"U")
self.addMenuItem("EDIT", "Bold & Italic", lambda e: self._copyAndPasteHelper("BOLD_ITALIC"), shortcut="Shift-" + shortcut + "B")
self.disableMenu("EDIT")
def _editMenuSetter(self, enabled=True):
if enabled:
self.addMenuEdit()
else:
self.disableMenuEdit()
def _editMenuGetter(self):
return self.copyAndPaste.inUse
editMenu = property(_editMenuGetter, _editMenuSetter)
def appJarAbout(self, menu=None):
self.infoBox("About appJar",
"---\n" +
__copyright__ + "\n" +
"---\n\t" +
gui.SHOW_VERSION().replace("\n", "\n\t") + "\n" +
"---\n" +
gui.SHOW_PATHS() + "\n" +
"---")
def appJarHelp(self, menu=None):
self.infoBox("appJar Help", "For help, visit " + __url__)
def addAppJarMenu(self):
if self.platform == self.MAC:
self.addMenuItem("MAC_APP", "About appJar", self.appJarAbout)
self.addMenuWindow()
self.addMenuHelp(self.appJarHelp)
elif self.platform == self.WINDOWS:
self.addMenuSeparator('WIN_SYS')
self.addMenuItem("WIN_SYS", "About appJar", self.appJarAbout)
self.addMenuItem("WIN_SYS", "appJar Help", self.appJarHelp)
#####################################
# FUNCTIONS for status bar
#####################################
def removeStatusbarField(self, field):
if self.hasStatus and field < len(self._statusFields):
self._statusFields[field].pack_forget()
self._statusFields[field].destroy()
del self._statusFields[field]
else:
raise ItemLookupError("Invalid field number for statusbar: " + str(field))
def removeStatusbar(self):
if self.hasStatus:
while len(self._statusFields) > 0:
self.removeStatusbarField(0)
self.statusFrame.pack_forget()
self.statusFrame.destroy()
self.hasStatus = False
self.header = ""
def status(self, *args, **kwargs):
self.statusbar(*args, **kwargs)
def statusbar(self, *args, **kwargs):
""" simpleGUI - shortener for statusbar """
header = kwargs.pop('header', None)
bg = kwargs.pop('bg', None)
fg = kwargs.pop('fg', None)
width = kwargs.pop('width', None)
text = kwargs.pop('text', "")
if not self.hasStatus:
self.addStatusbar(header=kwargs.pop('header', ""), fields=kwargs.pop('fields', 1), side=kwargs.pop('side', None))
self.setStatusbar(text=text)
else:
if len(args) > 0: text = args[0]
field = 0 if len(args) < 2 else args[1]
self.setStatusbar(text=kwargs.pop('text', text), field=kwargs.pop('field', field))
if header is not None: self.setStatusbarHeader(header)
if bg is not None: self.setStatusbarBg(bg)
if fg is not None: self.setStatusbarFg(fg)
if width is not None: self.setStatusbarWidth(width)
def addStatusbar(self, header="", fields=1, side=None):
if not self.hasStatus:
class Statusbar(Frame, object):
def __init__(self, master, **kwargs):
super(Statusbar, self).__init__(master, **kwargs)
self.hasStatus = True
self.header = header
self.statusFrame = Statusbar(self.appWindow)
self.statusFrame.config(bd=1, relief=SUNKEN)
self.statusFrame.pack(side=BOTTOM, fill=X, anchor=S)
self._statusFields = []
for i in range(fields):
self._statusFields.append(Label(self.statusFrame))
self._statusFields[i].config(
bd=1,
relief=SUNKEN,
anchor=W,
font=self._statusFont,
width=10)
self._addTooltip(self._statusFields[i], "Status bar", True)
if side == "LEFT":
self._statusFields[i].pack(side=LEFT)
elif side == "RIGHT":
self._statusFields[i].pack(side=RIGHT)
else:
self._statusFields[i].pack(side=LEFT, expand=1, fill=BOTH)
else:
self.error("Statusbar already exists - ignoring")
def setStatusbarHeader(self, header):
if self.hasStatus:
self.header = header
def setStatusbar(self, text, field=0):
if self.hasStatus:
if field is None:
for status in self._statusFields:
status.config(text=self._getFormatStatus(text))
elif field >= 0 and field < len(self._statusFields):
self._statusFields[field].config(text=self._getFormatStatus(text))
else:
raise Exception("Invalid status field: " + str(field) +
". Must be between 0 and " + str(len(self._statusFields) - 1))
def setStatusbarBg(self, colour, field=None):
if self.hasStatus:
if field is None:
for status in self._statusFields:
status.config(background=colour)
elif field >= 0 and field < len(self._statusFields):
self._statusFields[field].config(background=colour)
else:
raise Exception("Invalid status field: " + str(field) +
". Must be between 0 and " + str(len(self._statusFields) - 1))
def setStatusbarFg(self, colour, field=None):
if self.hasStatus:
if field is None:
for status in self._statusFields:
status.config(foreground=colour)
elif field >= 0 and field < len(self._statusFields):
self._statusFields[field].config(foreground=colour)
else:
raise Exception("Invalid status field: " + str(field) +
". Must be between 0 and " + str(len(self._statusFields) - 1))
def setStatusbarWidth(self, width, field=None):
if self.hasStatus:
if field is None:
for status in self._statusFields:
status.config(width=width)
elif field >= 0 and field < len(self._statusFields):
self._statusFields[field].config(width=width)
else:
raise Exception("Invalid status field: " + str(field) +
". Must be between 0 and " + str(len(self._statusFields) - 1))
def clearStatusbar(self, field=None):
if self.hasStatus:
if field is None:
for status in self._statusFields:
status.config(text=self._getFormatStatus(""))
elif field >= 0 and field < len(self._statusFields):
self._statusFields[field].config(text=self._getFormatStatus(""))
else:
raise Exception("Invalid status field: " + str(field) +
". Must be between 0 and " + str(len(self._statusFields) - 1))
# formats the string shown in the status bar
def _getFormatStatus(self, text):
text = str(text)
if len(text) == 0:
return ""
elif len(self.header) == 0:
return text
else:
return self.header + ": " + text
#####################################
# TOOLTIPS
#####################################
def _addTooltip(self, item, text, hideWarn=False):
self._loadTooltip()
if not ToolTip:
if not hideWarn:
self.warn("ToolTips unavailable - check tooltip.py is in the lib folder")
elif text == "":
self._disableTooltip(item)
else:
# turn off warnings about tooltips
with PauseLogger():
# if there's already tt, just change it
if hasattr(item, "tt_var"):
item.tt_var.set(text)
# otherwise create one
else:
var = StringVar(self.topLevel)
var.set(text)
tip = ToolTip(item, delay=500, follow_mouse=1, textvariable=var)
item.tooltip = tip
item.tt_var = var
return item.tt_var
def _enableTooltip(self, item):
if hasattr(item, "tooltip"):
item.tooltip.configure(state="normal")
else:
self.warn("Unable to enable tooltip - none present.")
def _disableTooltip(self, item):
if hasattr(item, "tooltip"):
item.tooltip.configure(state="disabled")
else:
self.warn("Unable to disable tooltip - none present.")
#####################################
# FUNCTIONS to show pop-up dialogs
#####################################
def popUp(self, title, message, kind="info", parent=None):
""" simpleGUI - shortener for the various popUps """
if kind == "info": return self.infoBox(title, message, parent)
elif kind == "error": return self.errorBox(title, message, parent)
elif kind == "warning": return self.warningBox(title, message, parent)
elif kind == "yesno": return self.yesNoBox(title, message, parent)
elif kind == "question": return self.questionBox(title, message, parent)
elif kind == "ok": return self.okBox(title, message, parent)
elif kind == "retry": return self.retryBox(title, message, parent)
elif kind == "string": return self.stringBox(title, message, parent)
elif kind == "integer": return self.integerBox(title, message, parent)
elif kind == "float": return self.floatBox(title, message, parent)
elif kind == "text": return self.textBox(title, message, parent)
elif kind == "number": return self.numberBox(title, message, parent)
else: gui.error("Invalid popUp kind: %s, with title: %s", kind, title)
def prompt(self, title, message, kind="string", parent=None):
return self.popUp(title, message, kind, parent)
# function to access the last made pop_up
def getPopUp(self):
return self.topLevel.POP_UP
def infoBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
MessageBox.showinfo(title, message)
if self.topLevel.displayed:
self._bringToFront()
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
MessageBox.showinfo(title, message, **opts)
self._bringToFront(parent)
def errorBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
MessageBox.showerror(title, message)
if self.topLevel.displayed:
self._bringToFront()
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
MessageBox.showerror(title, message, **opts)
self._bringToFront(parent)
def warningBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
MessageBox.showwarning(title, message)
if self.topLevel.displayed:
self._bringToFront()
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
MessageBox.showwarning(title, message, **opts)
self._bringToFront(parent)
def yesNoBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return MessageBox.askyesno(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return MessageBox.askyesno(title=title, message=message, **opts)
def stringBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return SimpleDialog.askstring(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return SimpleDialog.askstring(title=title, message=message, **opts)
def integerBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return SimpleDialog.askinteger(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return SimpleDialog.askinteger(title=title, message=message, **opts)
def floatBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return SimpleDialog.askfloat(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return SimpleDialog.askfloat(title=title, message=message, **opts)
def questionBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return True if MessageBox.askquestion(title, message).lower() == "yes" else False
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return True if MessageBox.askquestion(title, message, **opts).lower() == "yes" else False
def okBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
title, message = self._translatePopup(title, message)
if parent is None:
return MessageBox.askokcancel(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return MessageBox.askokcancel(title, message, **opts)
def retryBox(self, title, message, parent=None):
self.topLevel.update_idletasks()
if parent is None:
return MessageBox.askretrycancel(title, message)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
return MessageBox.askretrycancel(title, message, **opts)
def openBox(self, title=None, dirName=None, fileTypes=None, asFile=False, parent=None):
self.topLevel.update_idletasks()
# define options for opening
options = {}
if title is not None:
options['title'] = title
if dirName is not None:
options['initialdir'] = dirName
if fileTypes is not None:
options['filetypes'] = fileTypes
if parent is not None:
options["parent"] = self.widgetManager.get(self.Widgets.SubWindow, parent)
if asFile:
return filedialog.askopenfile(mode="r", **options)
# will return "" if cancelled
else:
return filedialog.askopenfilename(**options)
def saveBox( self, title=None, fileName=None, dirName=None, fileExt=".txt",
fileTypes=None, asFile=False, parent=None):
self.topLevel.update_idletasks()
if fileTypes is None:
fileTypes = [('all files', '.*'), ('text files', '.txt')]
# define options for opening
options = {}
options['defaultextension'] = fileExt
options['filetypes'] = fileTypes
options['initialdir'] = dirName
options['initialfile'] = fileName
options['title'] = title
if parent is not None:
options["parent"] = self.widgetManager.get(self.Widgets.SubWindow, parent)
if asFile:
return filedialog.asksaveasfile(mode='w', **options)
# will return "" if cancelled
else:
return filedialog.asksaveasfilename(**options)
def directoryBox(self, title=None, dirName=None, parent=None):
self.topLevel.update_idletasks()
options = {}
options['initialdir'] = dirName
options['title'] = title
options['mustexist'] = False
if parent is not None:
options["parent"] = self.widgetManager.get(self.Widgets.SubWindow, parent)
fileName = filedialog.askdirectory(**options)
if fileName == "":
return None
else:
return fileName
def colourBox(self, colour='#ff0000', parent=None):
self.topLevel.update_idletasks()
if parent is None:
col = askcolor(colour)
else:
opts = {"parent": self.widgetManager.get(self.Widgets.SubWindow, parent)}
col = askcolor(colour, **opts)
if col[1] is None:
return None
else:
return col[1]
def textBox(self, title="Text Box", question="Enter text", defaultValue=None, parent=None):
self.topLevel.update_idletasks()
if defaultValue is not None:
defaultVar = StringVar(self.topLevel)
defaultVar.set(defaultValue)
else:
defaultVar = None
if parent is None:
parent = self.topLevel
else:
parent = self.widgetManager.get(self.Widgets.SubWindow, parent)
return TextDialog(parent, title, question, defaultVar=defaultVar).result
def numberBox(self, title="Number Box", question="Enter a number", parent=None):
return self.numBox(title, question, parent)
def numBox(self, title="Number Box", question="Enter a number", parent=None):
self.topLevel.update_idletasks()
if parent is None:
parent = self.topLevel
else:
parent = self.widgetManager.get(self.Widgets.SubWindow, parent)
return NumDialog(parent, title, question).result
############################################################################
#### ******* ------ CLASS MAKERS FROM HERE ------ *********** #########
############################################################################
#####################################
# Named classes for containing groups
#####################################
def _makeParentBox(self):
class ParentBox(frameBase, object):
def __init__(self, parent, **opts):
super(ParentBox, self).__init__(parent, **opts)
self.setup()
def setup(self):
pass
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "bg" in kw:
for child in self.winfo_children():
gui.SET_WIDGET_BG(child, kw["bg"])
kw = self.processConfig(kw)
# propagate anything left
super(ParentBox, self).config(cnf, **kw)
def processConfig(self, kw):
return kw
return ParentBox
def _makeLabelBox(self):
ParentBox = self._makeParentBox()
class LabelBox(ParentBox):
def setup(self):
self.theLabel = None
self.theWidget = None
return LabelBox
def _makeButtonBox(self):
ParentBox = self._makeParentBox()
class ButtonBox(ParentBox):
def setup(self):
self.theWidget = None
self.theButton = None
return ButtonBox
def _makeWidgetBox(self):
ParentBox = self._makeParentBox()
class WidgetBox(ParentBox):
def setup(self):
self.theWidgets = []
return WidgetBox
def makeListBoxContainer(self):
ParentBox = self._makeParentBox()
class ListBoxContainer(Frame, object):
def __init__(self, parent, **opts):
super(ListBoxContainer, self).__init__(parent)
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
# propagate anything left
super(ListBoxContainer, self).config(cnf, **kw)
return ListBoxContainer
#####################################
# Simple Separator
#####################################
def _makeSeparator(self):
class Separator(frameBase, object):
def __init__(self, parent, orient="horizontal", *args, **options):
super(Separator, self).__init__(parent, *args, **options)
self.line = frameBase(self)
if orient == "horizontal":
self.line.config(relief="ridge", height=2, width=100, borderwidth=1)
self.line.pack(padx=5, pady=5, fill="x", expand=1)
else:
self.line.config(relief="ridge", height=100, width=2, borderwidth=1)
self.line.pack(padx=5, pady=5, fill="y", expand=1)
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
if "fg" in kw:
self.line.config(bg=kw.pop("fg"))
super(Separator, self).config(cnf, **kw)
return Separator
#####################################
# Drag Grip Label Class
#####################################
def _makeGrip(self):
class Grip(labelBase, object):
gray25 = BitmapImage(data="""
#define im_width 16
#define im_height 16
static char im_bits[] = {
0x88, 0x88, 0x22, 0x22, 0x88, 0x88, 0x22, 0x22,
0x88, 0x88, 0x22, 0x22, 0x88, 0x88, 0x22, 0x22,
0x88, 0x88, 0x22, 0x22, 0x88, 0x88, 0x22, 0x22,
0x88, 0x88, 0x22, 0x22, 0x88, 0x88, 0x22, 0x22,
};
""")
def __init__(self, *args, **kwargs):
super(Grip, self).__init__(image=self.gray25, *args, **kwargs)
self.config(cursor="fleur", anchor=CENTER)
self.bind("<ButtonPress-1>", self.StartMove)
self.bind("<ButtonRelease-1>", self.StopMove)
self.bind("<B1-Motion>", self.OnMotion)
def StartMove(self, event):
self.x = event.x
self.y = event.y
def StopMove(self, event):
self.x = None
self.y = None
def OnMotion(self, event):
parent = self.winfo_toplevel()
deltax = event.x - self.x
deltay = event.y - self.y
x = parent.winfo_x() + deltax
y = parent.winfo_y() + deltay
parent.geometry("+%s+%s" % (x, y))
return Grip
#####################################
# Hyperlink Class
#####################################
@staticmethod
def _makeLink():
class Link(labelBase, object):
def __init__(self, *args, **kwargs):
self.useTtk = kwargs.pop('useTtk',False)
super(Link, self).__init__(*args, **kwargs)
self.fg = "#0000ff"
self.overFg="#3366ff"
if not self.useTtk:
self.config(fg=self.fg, takefocus=1)#, highlightthickness=0)
else:
self.config(style="Link.TLabel")
self.DEFAULT_TEXT = ""
if gui.GET_PLATFORM() == gui.MAC:
self.config(cursor="pointinghand")
elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]:
self.config(cursor="hand2")
self.bind("<Enter>", self.enter)
self.bind("<Leave>", self.leave)
def enter(self, e):
if self.useTtk:
self.config(style="LinkOver.TLabel")
else:
self.config(fg=self.overFg)
def leave(self, e):
if self.useTtk:
self.config(style="Over.TLabel")
else:
self.config(fg=self.fg)
def registerCallback(self, callback):
self.bind("<Button-1>", callback)
self.bind("<Return>", callback)
self.bind("<space>", callback)
def launchBrowser(self, event):
webbrowser.open_new(r"" + self.page)
# webbrowser.open_new_tab(self.page)
def registerWebpage(self, page):
if not page.startswith("http"):
raise InvalidURLError("Invalid URL: " + page + " (it should begin as http://)")
self.page = page
self.bind("<Button-1>", self.launchBrowser)
self.bind("<Return>", self.launchBrowser)
self.bind("<space>", self.launchBrowser)
def config(self, **kw):
self.configure(**kw)
def configure(self, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "text" in kw:
self.DEFAULT_TEXT = kw["text"]
super(Link, self).config(**kw)
def cget(self, option):
if option == "text" and hasattr(self, 'page'):
return self.page
return super(Link, self).cget(option)
return Link
#######################
# Upgraded scale - http://stackoverflow.com/questions/42843425/change-trough-increment-in-python-tkinter-scale-without-affecting-slider/
#######################
def _makeAjScale(self):
class AjScale(scaleBase, object):
'''a scale where a trough click jumps by a specified increment instead of the resolution'''
def __init__(self, master=None, **kwargs):
self.increment = kwargs.pop('increment',1)
super(AjScale, self).__init__(master, **kwargs)
self.bind('<Button-1>', self.jump)
def jump(self, event):
clicked = self.identify(event.x, event.y)
return self._jump(clicked)
def _jump(self, clicked):
if clicked == 'trough1':
self.set(self.get() - self.increment)
elif clicked == 'trough2':
self.set(self.get() + self.increment)
else:
return None
return 'break'
return AjScale
#####################################
# appJar Frame
#####################################
def _makeAjFrame(self):
class ajFrame(frameBase, object):
def __init__(self, parent, *args, **options):
super(ajFrame, self).__init__(parent, *args, **options)
return ajFrame
#########################
# Class to provide auto-completion on Entry boxes
# inspired by: https://gist.github.com/uroshekic/11078820
#########################
def _makeAutoCompleteEntry(self):
### Create the dynamic class
class AutoCompleteEntry(entryBase, object):
def __init__(self, words, tl, *args, **kwargs):
super(AutoCompleteEntry, self).__init__(*args, **kwargs)
self.allWords = words
self.allWords.sort()
self.topLevel = tl
# store variable - so we can see when it changes
self.var = self["textvariable"] = StringVar()
self.var.auto_id = self.var.trace('w', self.textChanged)
# register events
self.bind("<Right>", self.selectWord)
self.bind("<Return>", self.selectWord)
self.bind("<Up>", self.moveUp)
self.bind("<Down>", self.moveDown)
self.bind("<FocusOut>", self.closeList, add="+")
self.bind("<Escape>", self.closeList, add="+")
# no list box - yet
self.listBoxShowing = False
self.rows = 10
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "font" in kw:
self.listFont = kw["font"]
# propagate anything left
super(AutoCompleteEntry, self).config(cnf, **kw)
def removeWord(self, word):
if word in self.allWords:
self.allWords.remove(word)
def addWords(self, words):
if not hasattr(words, "__iter__"):
words = [words]
for word in words:
if word not in self.allWords:
self.allWords.append(word)
self.allWords.sort()
def changeWords(self, words):
self.allWords = words
self.allWords.sort()
def setNumRows(self, rows):
self.rows = rows
# function to see if words match
def checkMatch(self, fieldValue, acListEntry):
pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
# function to get all matches as a list
def getMatches(self):
return [w for w in self.allWords if self.checkMatch(self.var.get(), w)]
# called when typed in entry
def textChanged(self, name, index, mode):
# if no text - close list
if self.var.get() == '':
self.closeList()
else:
if not self.listBoxShowing:
self.makeListBox()
self.popListBox()
# add words to the list
def popListBox(self):
if self.listBoxShowing:
self.listbox.delete(0, END)
shownWords = self.getMatches()
if shownWords:
for w in shownWords:
self.listbox.insert(END, w)
self.selectItem(0)
# function to create & show an empty list box
def makeListBox(self):
self.listbox = Listbox(self.topLevel, width=self["width"]-8, height=8)
self.listbox.config(height=self.rows)
# self.listbox.config(bg=self.cget("bg"), selectbackground=self.cget("selectbackground"))
# self.listbox.config(fg=self.cget("fg"))
if hasattr(self, "listFont"):
self.listbox.config(font=self.listFont)
self.listbox.bind("<Button-1>", self.mouseClickBox)
self.listbox.bind("<Right>", self.selectWord)
self.listbox.bind("<Return>", self.selectWord)
x = self.winfo_rootx() - self.topLevel.winfo_rootx()
y = self.winfo_rooty() - self.topLevel.winfo_rooty() + self.winfo_height()
self.listbox.place(x=x, y=y)
self.listBoxShowing = True
# function to handle a mouse click in the list box
def mouseClickBox(self, e=None):
self.selectItem(self.listbox.nearest(e.y))
self.selectWord(e)
# function to close/delete list box
def closeList(self, event=None):
if self.listBoxShowing:
self.listbox.destroy()
self.listBoxShowing = False
# copy word from list to entry, close list
def selectWord(self, event):
if self.listBoxShowing:
self.var.set(self.listbox.get(ACTIVE))
self.icursor(END)
self.closeList()
return "break"
# wrappers for up/down arrows
def moveUp(self, event):
return self.arrow("UP")
def moveDown(self, event):
return self.arrow("DOWN")
# function for handling up/down keys
def arrow(self, direction):
if not self.listBoxShowing:
self.makeListBox()
self.popListBox()
curItem = 0
numItems = self.listbox.size()
else:
numItems = self.listbox.size()
curItem = self.listbox.curselection()
if curItem == ():
curItem = -1
else:
curItem = int(curItem[0])
if direction == "UP" and curItem > 0:
curItem -= 1
elif direction == "UP" and curItem <= 0:
curItem = numItems - 1
elif direction == "DOWN" and curItem < numItems - 1:
curItem += 1
elif direction == "DOWN" and curItem == numItems - 1:
curItem = 0
self.selectItem(curItem)
# stop the event propgating
return "break"
# function to select the specified item
def selectItem(self, position):
numItems = self.listbox.size()
self.listbox.selection_clear(0, numItems - 1)
self.listbox.see(position) # Scroll!
self.listbox.selection_set(first=position)
self.listbox.activate(position)
# return the dynamic class
return AutoCompleteEntry
#####################################
# Tree Widget Class
# https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch11s11.html
# idlelib -> TreeWidget.py
# modify minidom - https://wiki.python.org/moin/MiniDom
#####################################
def _makeAjTreeNode(self):
class AjTreeNode(TreeNode, object):
def __init__(self, canvas, parent, item):
super(AjTreeNode, self).__init__(canvas, parent, item)
self.bgColour = None
self.fgColour = None
self.bgHColour = None
self.fgHColour = None
# called (if set) when a leaf is edited
self.editEvent = None
if self.parent:
self.bgColour = self.parent.bgColour
self.fgColour = self.parent.fgColour
self.bgHColour = self.parent.bgHColour
self.fgHColour = self.parent.fgHColour
self.editEvent = self.parent.editEvent
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "bg" in kw:
self.setBgColour(kw.pop("bg"))
if "fg" in kw:
self.setFgColour(kw.pop("fg"))
# # propagate anything left
# super(AjTreeNode, self).config(cnf, **kw)
def registerEditEvent(self, func):
self.editEvent = func
for c in self.children:
c.registerEditEvent(func)
def setBgColour(self, colour):
self.canvas.config(background=colour)
self.bgColour = colour
self._doUpdateColour()
def setFgColour(self, colour):
self.fgColour = colour
self._doUpdateColour()
def setBgHColour(self, colour):
self.bgHColour = colour
self._doUpdateColour()
def setFgHColour(self, colour):
self.fgHColour = colour
self._doUpdateColour()
def setAllColours(self, bg, fg, bgH, fgH):
self.canvas.config(background=bg)
self.bgColour = bg
self.fgColour = fg
self.bgHColour = bgH
self.fgHColour = fgH
self._doUpdateColour()
def _doUpdateColour(self):
self._updateColours(self.bgColour, self.bgHColour, self.fgColour, self.fgHColour)
self.update()
def _updateColours(self, bgCol, bgHCol, fgCol, fgHCol):
self.bgColour = bgCol
self.fgColour = fgCol
self.bgHColour = bgHCol
self.fgHColour = fgHCol
for c in self.children:
c._updateColours(bgCol, bgHCol, fgCol, fgHCol)
# override parent function, so that we can change the label's background colour
def drawtext(self):
super(AjTreeNode, self).drawtext()
self.colourLabels()
# override parent function, so that we can generate an event on finish editing
def edit_finish(self, event=None):
super(AjTreeNode, self).edit_finish(event)
if self.editEvent is not None:
self.editEvent()
def colourLabels(self):
try:
if not self.selected:
self.label.config(background=self.bgColour, fg=self.fgColour)
else:
self.label.config(background=self.bgHColour, fg=self.fgHColour)
except:
pass
def getSelectedText(self):
item = self.getSelected()
if item is not None:
return item.GetText(), item.getAttribute()
else:
return None
def getSelected(self):
if self.selected:
return self.item
else:
for c in self.children:
val = c.getSelected()
if val is not None:
return val
return None
return AjTreeNode
def _makeAjTreeData(self):
# implementation of container for XML data
# functions implemented as specified in skeleton
class AjTreeData(TreeItem, object):
def __init__(self, node):
self.node = node
self.dblClickFunc = None
self.clickFunc = None
self.treeTitle = None
self.canEdit = True
# REQUIRED FUNCTIONS
# called whenever the tree expands
def GetText(self):
node = self.node
if node.nodeType == node.ELEMENT_NODE:
return node.nodeName
elif node.nodeType == node.TEXT_NODE:
return node.nodeValue
def getAttribute(self, att='id'):
try: return self.node.attributes[att].value
except: return None
def IsEditable(self):
return self.canEdit and not self.node.hasChildNodes()
def SetText(self, text):
self.node.replaceWholeText(text)
def IsExpandable(self):
return self.node.hasChildNodes()
def GetIconName(self):
if self.clickFunc is not None:
self.clickFunc(self.treeTitle, self.getAttribute())
if not self.IsExpandable():
return "python" # change to file icon
def GetSubList(self):
children = self.node.childNodes
prelist = [AjTreeData(node) for node in children]
itemList = [item for item in prelist if item.GetText().strip()]
for item in itemList:
item.registerDblClick(self.treeTitle, self.dblClickFunc)
item.registerClick(self.treeTitle, self.clickFunc)
item.canEdit = self.canEdit
return itemList
def OnDoubleClick(self):
if self.IsEditable():
# TO DO: start editing this node...
pass
if self.dblClickFunc is not None:
self.dblClickFunc(self.treeTitle, self.getAttribute())
# EXTRA FUNCTIONS
# TODO: can only set before calling go()
def setCanEdit(self, value=True):
self.canEdit = value
# TODO: can only set before calling go()
def registerDblClick(self, title, func):
self.treeTitle = title
self.dblClickFunc = func
# TODO: can only set before calling go()
def registerClick(self, title, func):
self.treeTitle = title
self.clickFunc = func
# not used - for DEBUG
def getSelected(self, spaces=1):
if spaces == 1:
gui.trace("%s", self.node.tagName)
for c in self.node.childNodes:
if gui.GET_WIDGET_TYPE(c) == "Element":
gui.trace("%s >> %s", " "*spaces, c.tagName)
node = AjTreeData(c)
node.getSelected(spaces + 2)
elif gui.GET_WIDGET_TYPE(c) == "Text":
val = c.data.strip()
if len(val) > 0:
gui.trace("%s >>>> %s", " "*spaces, val)
return AjTreeData
############################################################################
#### ******* ------ CLASS DEFINITIONS FROM HERE ------ *********** #########
############################################################################
#####################################
# appJar OptionMenu
# allows dropDown to be configure at the same time
#####################################
class ajOption(OptionMenu, object):
def __init__(self, parent, *args, **options):
super(ajOption, self).__init__(parent, *args, **options)
self.dropDown = self.nametowidget(self.menuname)
self.dropDown.configure(font=options.pop('font', None))
def config(self, **args):
super(ajOption, self).config(**args)
self.dropDown.configure(font=args.pop('font', None))
#####################################
# ProgressBar Class
# from: http://tkinter.unpythonic.net/wiki/ProgressMeter
# A gradient fill will be applied to the Meter
#####################################
class Meter(Frame, object):
def __init__(self, master, width=100, height=20,
bg='#FFFFFF', fillColour='orchid1',
value=0.0, text=None, font=None,
fg='#000000', *args, **kw):
# call the super constructor
super(Meter, self).__init__(master, bg=bg,
width=width, height=height, relief='ridge', bd=3, *args, **kw)
# remember the starting value
self._value = value
self._colour = fillColour
self._midFill = fg
# create the canvas
self._canv = Canvas(self, bg=self['bg'],
width=self['width'], height=self['height'],
highlightthickness=0, relief='flat', bd=0)
self._canv.pack(fill='both', expand=1)
# create the text
width, height = self.getWH(self._canv)
self._text = self._canv.create_text(
width / 2, height / 2, text='', fill=fg)
if font:
self._canv.itemconfigure(self._text, font=font)
self.set(value, text)
self.moveText()
# bind refresh event
self.bind('<Configure>', self._update_coords)
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "fill" in kw:
self._colour = kw.pop("fill")
if "fg" in kw:
col = kw.pop("fg")
self._canv.itemconfigure(self._text, fill=col)
self._midFill = col
if "bg" in kw:
self._canv.config(bg=kw.pop("bg"))
if "width" in kw:
self._canv.config(width=kw.pop("width"))
if "height" in kw:
self._canv.config(height=kw.pop("height"))
if "font" in kw:
self._canv.itemconfigure(self._text, font=kw.pop("fillColour"))
super(Meter, self).config(cnf, **kw)
self.makeBar()
# called when resized
def _update_coords(self, event):
'''Updates the position of the text and rectangle inside the canvas
when the size of the widget gets changed.'''
self.makeBar()
self.moveText()
# getter
def get(self):
val = self._value
try:
txt = self._canv.itemcget(self._text, 'text')
except:
txt = None
return val, txt
# update the variables, then call makeBar
def set(self, value=0.0, text=None):
# make the value failsafe:
value = value / 100.0
if value < 0.0:
value = 0.0
elif value > 1.0:
value = 1.0
self._value = value
# if no text is specified use the default percentage string:
if text is None:
text = str(int(round(100 * value))) + ' %'
# set the new text
self._canv.itemconfigure(self._text, text=text)
self.makeBar()
# draw the bar
def makeBar(self):
width, height = self.getWH(self._canv)
start = 0
fin = width * self._value
self.drawLines(width, height, start, fin, self._value, self._colour)
self._canv.update_idletasks()
# move the text
def moveText(self):
width, height = self.getWH(self._canv)
if hasattr(self, "_text"):
self._canv.coords( self._text, width/2, height/2)
# draw gradated lines, in given coordinates
# using the specified colour
def drawLines(self, width, height, start, fin, val, col, tags="gradient"):
'''Draw a gradient'''
# http://stackoverflow.com/questions/26178869/is-it-possible-to-apply-gradient-colours-to-bg-of-tkinter-python-widgets
# remove the lines & midline
self._canv.delete(tags)
self._canv.delete("midline")
# determine start & end colour
(r1, g1, b1) = self.tint(col, -30000)
(r2, g2, b2) = self.tint(col, 30000)
# determine a direction & range
if val < 0:
direction = -1
limit = int(start - fin)
else:
direction = 1
limit = int(fin - start)
# if lines to draw
if limit != 0:
# work out the ratios
r_ratio = float(r2 - r1) / limit
g_ratio = float(g2 - g1) / limit
b_ratio = float(b2 - b1) / limit
# loop through the range of lines, in the right direction
modder = 0
for i in range(int(start), int(fin), direction):
nr = int(r1 + (r_ratio * modder))
ng = int(g1 + (g_ratio * modder))
nb = int(b1 + (b_ratio * modder))
colour = "#%4.4x%4.4x%4.4x" % (nr, ng, nb)
self._canv.create_line(
i, 0, i, height, tags=(tags,), fill=colour)
modder += 1
self._canv.lower(tags)
# draw a midline
self._canv.create_line(start, 0, start, height,
fill=self._midFill, tags=("midline",))
self._canv.update_idletasks()
# function to calculate a tint
def tint(self, col, brightness_offset=1):
''' dim or brighten the specified colour by the specified offset '''
# http://chase-seibert.github.io/blog/2011/07/29/python-calculate-lighterdarker-rgb-colors.html
rgb_hex = self._canv.winfo_rgb(col)
new_rgb_int = [hex_value + brightness_offset for hex_value in rgb_hex]
# make sure new values are between 0 and 65535
new_rgb_int = [min([65535, max([0, i])]) for i in new_rgb_int]
return new_rgb_int
def getWH(self, widg):
# ISSUES HERE:
# on MAC & LINUX, w_width/w_height always 1
# on WIN, w_height is bigger then r_height - leaving empty space
self._canv.update_idletasks()
r_width = widg.winfo_reqwidth()
r_height = widg.winfo_reqheight()
w_width = widg.winfo_width()
w_height = widg.winfo_height()
max_height = max(r_height, w_height)
max_width = max(r_width, w_width)
return (max_width, max_height)
#####################################
# SplitMeter Class extends the Meter above
# Will fill in the empty space with a second fill colour
# Two colours should be provided - left & right fill
#####################################
class SplitMeter(Meter):
def __init__(self, master, width=100, height=20,
bg='#FFFFFF', leftfillColour='#FF0000', rightfillColour='#0000FF',
value=0.0, text=None, font=None, fg='#000000', *args, **kw):
self._leftFill = leftfillColour
self._rightFill = rightfillColour
super(SplitMeter, self).__init__(master, width=width, height=height,
bg=bg, value=value, text=text, font=font,
fg=fg, *args, **kw)
# override the handling of fill
# list of two colours
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "fill" in kw:
cols = kw.pop("fill")
if not isinstance(cols, list):
raise Exception("SplitMeter requires a list of two colours")
else:
self._leftFill = cols[0]
self._rightFill = cols[1]
# propagate any left over confs
super(SplitMeter, self).configure(cnf, **kw)
def set(self, value=0.0, text=None):
# make the value failsafe:
value = value / 100.0
if value < 0.0:
value = 0.0
elif value > 1.0:
value = 1.0
self._value = value
self.makeBar()
# override the makeBar function
def makeBar(self):
width, height = self.getWH(self._canv)
mid = width * self._value
self.drawLines(width, height, 0, mid, self._value, self._leftFill, tags="left")
self.drawLines(width, height, mid, width, self._value, self._rightFill, tags="right")
#####################################
# SplitMeter Class extends the Meter above
# Used to allow bi-directional metering, starting from a mid point
# Two colours should be provided - left & right fill
# A gradient fill will be applied to the Meter
#####################################
class DualMeter(SplitMeter):
def __init__(self, master, width=100, height=20, bg='#FFFFFF',
leftfillColour='#FFC0CB', rightfillColour='#00FF00',
value=None, text=None,
font=None, fg='#000000', *args, **kw):
super(DualMeter, self).__init__(master, width=width, height=height,
bg=bg, leftfillColour=leftfillColour,
rightfillColour=rightfillColour,
value=value, text=text, font=font,
fg=fg, *args, **kw)
def set(self, value=[0,0], text=None):
if value is None:
value=[0,0]
if not hasattr(value, "__iter__"):
raise Exception("DualMeter.set() requires a list of two arguments")
# make copy, and reduce to decimal
vals = [value[0]/100.0, value[1]/100.0]
# normalise
if vals[0] < -1: vals[0] = -1.0
elif vals[0] > 0: vals[0] = vals[0] * -1
if vals[1] > 1.0: vals[1] = 1.0
elif vals[1] < 0: vals[1] = 0
elif vals[1] < -1: vals[1] = -1.0
self._value = vals
# if no text is specified use the default percentage string:
if text is not None:
# set the new text
self._canv.itemconfigure(self._text, text=text)
self.makeBar()
def makeBar(self):
# get range to draw lines
width, height = self.getWH(self._canv)
start = width / 2
l_fin = start + (start * self._value[0])
r_fin = start + (start * self._value[1])
self.drawLines(width, height, start, l_fin, self._value[0], self._leftFill, tags="left")
self.drawLines(width, height, start, r_fin, self._value[1], self._rightFill, tags="right")
#####################################
# Properties Widget
#####################################
class Properties(LabelFrame, object):
def __init__(self, parent, text, props=None, haveLabel=True, *args, **options):
if haveLabel: theText=text
else: theText=""
super(Properties, self).__init__(parent, text=theText, *args, **options)
self.parent = parent
self.config(relief="groove")
self.props = {}
self.cbs = {}
self.title = text
self.cmd = None
self.changingProps = False
self.addProperties(props)
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
vals = ["bg", "fg", "disabledforeground", "state", "font", "command"]
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
# loop through all kw properties received
for k, v in kw.items():
if k in vals:
# and set them on all CheckBoxes if desired
for prop_key in self.cbs:
self.cbs[prop_key][k] = v
if k == "bg":# and gui.GET_PLATFORM() == gui.LINUX:
gui.SET_WIDGET_BG(self.cbs[prop_key], v, True)
# remove any props the LabelFrame can't handle
kw.pop("state", None)
kw.pop("disabledforeground", None)
kw.pop("command", None)
super(Properties, self).config(cnf, **kw)
def addProperties(self, props, callFunction=True):
if props is not None:
for k in sorted(props):
self.addProperty(k, props[k], callFunction=False)
if self.cmd is not None and callFunction:
self.cmd()
def renameProperty(self, prop, newName=None):
if newName is None:
newName = prop
if prop in self.cbs:
self.cbs[prop].config(text=newName)
else:
gui.warn("Unknown property: %s", prop)
def addProperty(self, prop, value=False, callFunction=True):
self.changingProps = True
if prop in self.props:
if value is None:
del self.props[prop]
self.cbs[prop].pack_forget()
del self.cbs[prop]
else:
self.props[prop].set(value)
self.cbs[prop].defaultValue = value
elif prop is not None:
var = BooleanVar()
var.set(value)
var.trace('w', self._propChanged)
cb = Checkbutton(self)
cb.config(
anchor=W,
text=prop,
variable=var,
bg=self.cget("bg"),
font=self.cget("font"),
fg=self.cget("fg"))
cb.defaultValue = value
cb.pack(fill="x", expand=1)
self.props[prop] = var
self.cbs[prop] = cb
else:
self.changingProps = False
raise Exception("Can't add a None property to: ", prop)
# if text is not None: lab.config ( text=text )
if self.cmd is not None and callFunction:
self.cmd()
self.changingProps = False
def _propChanged(self, a,b,c):
if self.cmd is not None and not self.changingProps:
self.cmd()
def getProperties(self):
vals = {}
for k, v in self.props.items():
vals[k] = bool(v.get())
return vals
def clearProperties(self, callFunction=False):
for k, cb in self.cbs.items():
cb.deselect()
if self.cmd is not None and callFunction:
self.cmd()
def resetProperties(self, callFunction=False):
for k, cb in self.cbs.items():
if cb.defaultValue:
cb.select()
else:
cb.deselect()
if self.cmd is not None and callFunction:
self.cmd()
def getProperty(self, prop):
if prop in self.props:
return bool(self.props[prop].get())
else:
raise Exception("Property: " + str(prop) + " not found in Properties: " + self.title)
def setChangeFunction(self, cmd):
self.cmd = cmd
#####################################
# Pie Chart Class
#####################################
class PieChart(Canvas, object):
# constant for available colours
COLOURS = [
"#023fa5", "#7d87b9", "#bec1d4",
"#d6bcc0", "#bb7784", "#8e063b",
"#4a6fe3", "#8595e1", "#b5bbe3",
"#e6afb9", "#e07b91", "#d33f6a",
"#11c638", "#8dd593", "#c6dec7",
"#ead3c6", "#f0b98d", "#ef9708",
"#0fcfc0", "#9cded6", "#d5eae7",
"#f3e1eb", "#f6c4e1", "#f79cd4"]
def __init__(self, container, fracs, bg="#00FF00"):
super(PieChart, self).__init__(container, bd=0, highlightthickness=0, bg=bg)
self.fracs = fracs
self.arcs = []
self._drawPie()
self.bind("<Configure>", self._drawPie)
def _drawPie(self, event=None):
# remove the existing arcs
for arc in self.arcs:
self.delete(arc)
self.arcs = []
# get the width * height
w = self.winfo_width()
h = self.winfo_height()
# scale h&w - so they don't hit the edges
min_w = w * .05
max_w = w * .95
min_h = h * .05
max_h = h * .95
# if we're not in a square
# adjust them to make sure we get a circle
if w > h:
extra = (w * .9 - h * .9) / 2.0
min_w += extra
max_w -= extra
elif h > w:
extra = (h * .9 - w * .9) / 2.0
min_h += extra
max_h -= extra
coord = min_w, min_h, max_w, max_h
pos = col = 0
for key, val in self.fracs.items():
sliceId = "slice" + str(col)
arc = self.create_arc(
coord,
fill=self.COLOURS[col % len(self.COLOURS)],
start=self.frac(pos),
extent=self.frac(val),
activedash=(3, 5),
activeoutline="grey",
activewidth=3,
tag=(sliceId,),
width=1)
self.arcs.append(arc)
# generate a tooltip
if ToolTip is not False:
frac = int(float(val) / sum(self.fracs.values()) * 100)
tip = key + ": " + str(val) + " (" + str(frac) + "%)"
tt = ToolTip(self, tip, delay=500, follow_mouse=1, specId=sliceId)
pos += val
col += 1
def frac(self, curr):
return 360. * curr / sum(self.fracs.values())
def setValue(self, name, value):
if value == 0 and name in self.fracs:
del self.fracs[name]
else:
self.fracs[name] = value
self._drawPie()
#####################################
# errors
#####################################
class ItemLookupError(LookupError):
'''raise this when there's a lookup error for my app'''
pass
class InvalidURLError(ValueError):
'''raise this when there's a lookup error for my app'''
pass
#####################################
# ToggleFrame - collapsable frame
# http://stackoverflow.com/questions/13141259/expandable-and-contracting-frame-in-tkinter
#####################################
class ToggleFrame(Frame, object):
def __init__(self, parent, title="", *args, **options):
super(ToggleFrame, self).__init__(parent, *args, **options)
self.config(relief="raised", borderwidth=2, padx=5, pady=5)
self.showing = True
self.titleFrame = Frame(self)
self.titleFrame.config(bg="DarkGray")
self.titleLabel = Label(self.titleFrame, text=title)
self.DEFAULT_TEXT = title
self.titleLabel.config(font="-weight bold")
self.toggleButton = Button(self.titleFrame, width=2, text='-', command=self.toggle)
self.subFrame = Frame(self, relief="sunken", borderwidth=2)
self.configure(bg="DarkGray")
self.grid_columnconfigure(0, weight=1)
self.titleFrame.grid(row=0, column=0, sticky=EW)
self.titleFrame.grid_columnconfigure(0, weight=1)
self.titleLabel.grid(row=0, column=0)
self.toggleButton.grid(row=0, column=1)
self.subFrame.grid(row=1, column=0, sticky=EW)
self.firstTime = True
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "font" in kw:
self.titleLabel.config(font=kw["font"])
self.toggleButton.config(font=kw["font"])
del(kw["font"])
if "bg" in kw:
self.titleFrame.config(bg=kw["bg"])
self.titleLabel.config(bg=kw["bg"])
self.subFrame.config(bg=kw["bg"])
if gui.GET_PLATFORM() == gui.MAC:
self.toggleButton.config(highlightbackground=kw["bg"])
if "state" in kw:
if kw["state"] == "disabled":
if self.showing:
self.toggle()
self.toggleButton.config(state=kw["state"])
del(kw["state"])
if "text" in kw:
self.titleLabel.config(text=kw.pop("text"))
super(ToggleFrame, self).config(cnf, **kw)
def cget(self, option):
if option == "text":
return self.titleLabel.cget(option)
return super(ToggleFrame, self).cget(option)
def toggle(self):
if not self.showing:
self.subFrame.grid()
self.toggleButton.configure(text='-')
else:
self.subFrame.grid_remove()
self.toggleButton.configure(text='+')
self.showing = not self.showing
def getContainer(self):
return self.subFrame
def stop(self):
self.update_idletasks()
self.titleFrame.config(width=self.winfo_reqwidth())
if self.firstTime:
self.firstTime = False
self.toggle()
def isShowing(self):
return self.showing
#####################################
# Frame Stack
#####################################
class FrameStack(Frame, object):
def __init__(self, parent, beep=True, **opts):
self._change = opts.pop("change", None)
self._start = opts.pop("start", -1)
super(FrameStack, self).__init__(parent, **opts)
# the list of frames
self._frames = []
self._prevframe = -1
self._currFrame = -1
self._beep = beep
Grid.rowconfigure(self, 0, weight=1)
Grid.columnconfigure(self, 0, weight=1)
def showFrame(self, num, callFunction=True):
if num < 0 or num >= len(self._frames):
raise IndexError("The selected frame does not exist")
tmp = self._prevFrame
self._prevFrame = self._currFrame
self._currFrame = num
if callFunction and self._change is not None:
if self._change() is False:
self._currFrame = self._prevFrame
self._prevFrame = tmp
return
self._frames[self._currFrame].lift()
def atStart(self):
return self._currFrame == 0
def atEnd(self):
return self._currFrame == len(self._frames)-1
def setStartFrame(self, num):
self._start = num
def setChangeFunction(self, func):
self._change = func
def showNextFrame(self, callFunction=True):
if self._currFrame < len(self._frames) - 1:
self.showFrame(self._currFrame + 1, callFunction)
else:
if self._beep: self.bell()
def showPrevFrame(self, callFunction=True):
if self._currFrame > 0:
self.showFrame(self._currFrame - 1, callFunction)
else:
if self._beep: self.bell()
def showFirstFrame(self, callFunction=True):
if self._currFrame == 0:
if self._beep: self.bell()
else:
self.showFrame(0, callFunction)
def showLastFrame(self, callFunction=True):
if self._currFrame == len(self._frames)-1:
if self._beep: self.bell()
else:
self.showFrame(len(self._frames) - 1, callFunction)
def addFrame(self):
self._frames.append(frameBase(self))
self._prevFrame = self._currFrame
self._currFrame = len(self._frames) - 1
self._frames[self._currFrame].grid(row=0, column=0, sticky=N+S+E+W, padx=0, pady=0)
if self._start > -1 and self._start < len(self._frames):
tmp = self._beep
self._beep = False
self.showFrame(self._start, callFunction=False)
self._beep = tmp
return self._frames[-1]
def getFrame(self, num=None):
if num is None: num = self._currFrame
return self._frames[num]
def getNumFrames(self):
return len(self._frames)
def getCurrentFrame(self):
return self._currFrame
def getPreviousFrame(self):
return self._prevFrame
#####################################
# Paged Window
#####################################
class PagedWindow(Frame, object):
def __init__(self, parent, title=None, **opts):
# get the fonts
buttonFont = opts.pop('buttonFont', None)
titleFont = opts.pop('titleFont', None)
# call the super constructor
super(PagedWindow, self).__init__(parent, **opts)
self.config(width=300, height=400)
# globals to hold list of frames(pages) and current page
self.frameStack = FrameStack(self)
self.shouldShowPageNumber = True
self.shouldShowTitle = True
self.title = title
self.navPos = 1
# create the 3 components, including a default container frame
self.titleLabel = Label(self, font=titleFont)
self.prevButton = Button(self, text="PREVIOUS", command=self.showPrev, state="disabled", width=10, font=buttonFont)
self.nextButton = Button(self, text="NEXT", command=self.showNext, state="disabled", width=10, font=buttonFont)
self.prevButton.bind("<Control-Button-1>", self.showFirst)
self.nextButton.bind("<Control-Button-1>", self.showLast)
self.posLabel = Label(self, width=8, font=titleFont)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1)
self.grid_rowconfigure(2, weight=0)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
# grid the navigation components
self.frameStack.grid(row=int(not self.navPos) + 1, column=0, columnspan=3, sticky=N + S + E + W, padx=5, pady=5)
self.prevButton.grid(row=self.navPos + 1, column=0, sticky=N + S + W, padx=5, pady=(0, 5))
self.posLabel.grid(row=self.navPos + 1, column=1, sticky=N + S + E + W, padx=5, pady=(0, 5))
self.nextButton.grid(row=self.navPos + 1, column=2, sticky=N + S + E, padx=5, pady=(0, 5))
# show the title
if self.title is not None and self.shouldShowTitle:
self.titleLabel.config(text=self.title)
self.titleLabel.grid(row=0, column=0, columnspan=3, sticky=N + W + E)
self._updatePageNumber()
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "bg" in kw:
if gui.GET_PLATFORM() == gui.MAC:
self.prevButton.config(highlightbackground=kw["bg"])
self.nextButton.config(highlightbackground=kw["bg"])
self.posLabel.config(bg=kw["bg"])
self.titleLabel.config(bg=kw["bg"])
if "fg" in kw:
self.poslabel.config(fg=kw["fg"])
self.titleLabel.config(fg=kw["fg"])
kw.pop("fg")
if "prevbutton" in kw:
self.prevButton.config(text=kw.pop("prevbutton"))
if "nextbutton" in kw:
self.nextButton.config(text=kw.pop("nextbutton"))
if "title" in kw:
self.title = kw.pop("title")
self.showTitle()
if "showtitle" in kw:
kw.pop("showtitle")
if "showpagenumber" in kw:
self.shouldShowPageNumber = kw.pop("showpagenumber")
self._updatePageNumber()
if "command" in kw:
self.registerPageChangeEvent(kw.pop("command"))
super(PagedWindow, self).config(cnf, **kw)
# functions to change the labels of the two buttons
def setPrevButton(self, title):
self.prevButton.config(text=title)
def setNextButton(self, title):
self.nextButton.config(text=title)
def setNavPositionTop(self, top=True):
oldNavPos = self.navPos
pady = (0, 5)
if top: self.navPos = 0
else: self.navPos = 1
if oldNavPos != self.navPos:
if self.navPos == 0:
self.grid_rowconfigure(1, weight=0)
self.grid_rowconfigure(2, weight=1)
pady = (5, 0)
else:
self.grid_rowconfigure(1, weight=1)
self.grid_rowconfigure(2, weight=0)
# grid the navigation components
self.frameStack.grid_remove()
self.prevButton.grid_remove()
self.posLabel.grid_remove()
self.nextButton.grid_remove()
self.frameStack.grid(row=int(not self.navPos) + 1, column=0, columnspan=3, sticky=N + S + E + W, padx=5, pady=5)
self.prevButton.grid( row=self.navPos + 1, column=0, sticky=S + W, padx=5, pady=pady)
self.posLabel.grid( row=self.navPos + 1, column=1, sticky=S + E + W, padx=5, pady=pady)
self.nextButton.grid( row=self.navPos + 1, column=2, sticky=S + E, padx=5, pady=pady)
# whether to showPageNumber
def showPageNumber(self, val=True):
self.shouldShowPageNumber = val
self._updatePageNumber()
def setTitle(self, title):
self.title = title
self.showTitle()
def showTitle(self, val=True):
self.shouldShowTitle = val
if self.title is not None and self.shouldShowTitle:
self.titleLabel.config(text=self.title, font="-weight bold")
self.titleLabel.grid(row=0, column=0, columnspan=3, sticky=N + W + E)
else:
self.titleLabel.grid_remove()
# function to update the contents of the label
def _updatePageNumber(self):
if self.shouldShowPageNumber:
self.posLabel.config(
text=str(self.frameStack.getCurrentFrame() + 1) + "/" + str(self.frameStack.getNumFrames()))
else:
self.posLabel.config(text="")
# update the buttons
if self.frameStack.getNumFrames() == 1: # only 1 page - no buttons
self.prevButton.config(state="disabled")
self.nextButton.config(state="disabled")
elif self.frameStack.getCurrentFrame() == 0:
self.prevButton.config(state="disabled")
self.nextButton.config(state="normal")
elif self.frameStack.getCurrentFrame() == self.frameStack.getNumFrames() - 1:
self.prevButton.config(state="normal")
self.nextButton.config(state="disabled")
else:
self.prevButton.config(state="normal")
self.nextButton.config(state="normal")
# get current page number
def getPageNumber(self):
return self.frameStack.getCurrentFrame() + 1
# register a function to call when the page changes
def registerPageChangeEvent(self, event):
self.frameStack.setChangeFunction(event)
# adds a new page, making it visible
def addPage(self):
f = self.frameStack.addFrame()
return f
def stopPagedWindow(self):
self.showPage(1)
# function to display the specified page
def showPage(self, page):
try:
self.frameStack.showFrame(page-1)
self._updatePageNumber()
except:
raise Exception("Invalid page number: " + str(page) + ". Must be between 1 and " + str(self.frameStack.getNumFrames()))
def showFirst(self, event=None):
self.frameStack.showFirstFrame()
self._updatePageNumber()
def showLast(self, event=None):
self.frameStack.showLastFrame()
self._updatePageNumber()
def showPrev(self, event=None):
self.frameStack.showPrevFrame()
self._updatePageNumber()
def showNext(self, event=None):
self.frameStack.showNextFrame()
self._updatePageNumber()
class Page(Frame, object):
def __init__(self, parent, **opts):
super(Page, self).__init__(parent)
self.config(relief=RIDGE, borderwidth=2)
self.container = parent
#########################
# Pane class - used in PanedWindows
#########################
class Pane(Frame, object):
def __init__(self, parent, **opts):
super(Pane, self).__init__(parent)
#####################################
# scrollable frame...
# http://effbot.org/zone/tkinter-autoscrollbar.htm
#####################################
class AutoScrollbar(Scrollbar, object):
def __init__(self, parent, **opts):
super(AutoScrollbar, self).__init__(parent, **opts)
self.hidden = None
# a scrollbar that hides itself if it's not needed
# only works if you use the grid geometry manager
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
# grid_remove is currently missing from Tkinter!
self.tk.call("grid", "remove", self)
self.hidden = True
else:
self.grid()
self.hidden = False
super(AutoScrollbar, self).set(lo, hi)
def pack(self, **kw):
raise Exception("cannot use pack with this widget")
def place(self, **kw):
raise Exception("cannot use place with this widget")
# customised config setters
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
# properties to propagate to CheckBoxes
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "fg" in kw:
kw.pop("fg")
# propagate anything left
super(AutoScrollbar, self).config(cnf, **kw)
#######################
# Widget to give TextArea extra functionality
# http://code.activestate.com/recipes/464635-call-a-callback-when-a-tkintertext-is-modified/
#######################
class TextParent(object):
def _init(self):
self.clearModifiedFlag()
self.bind('<<Modified>>', self._beenModified)
self.__hash = None
self.callFunction = True
self.oldCallFunction = True
self.TAGS = ["UNDERLINE", "BOLD", "ITALIC", "BOLD_ITALIC"]
# create default fonts, and assign to tags
self._normalFont = tkFont.Font(family="Helvetica", size=12, slant="roman", weight="normal")
self._boldFont = tkFont.Font(family="Helvetica", size=12, weight="bold")
self._italicFont = tkFont.Font(family="Helvetica", size=12, slant="italic")
self._boldItalicFont = tkFont.Font(family="Helvetica", size=12, weight="bold", slant="italic")
self.tag_config("AJ_BOLD", font=self._boldFont)
self.tag_config("AJ_ITALIC", font=self._italicFont)
self.tag_config("AJ_BOLD_ITALIC", font=self._boldItalicFont)
self.tag_config("AJ_UNDERLINE", underline=True)
self.configure(font=self._normalFont)
def verifyFontTag(self, tag):
tag = tag.upper().strip()
if tag not in self.TAGS:
raise Exception("Invalid tag: " + tag + ". Must be one of: " + str(self.TAGS))
else:
return tag
def setFont(self, **kwargs):
""" only looking for size & family params """
self._normalFont.config(**kwargs)
self._boldFont.config(**kwargs)
self._italicFont.config(**kwargs)
self._boldItalicFont.config(**kwargs)
def pauseCallFunction(self, callFunction=False):
self.oldCallFunction = self.callFunction
self.callFunction = callFunction
def resumeCallFunction(self):
self.callFunction = self.oldCallFunction
def _beenModified(self, event=None):
# stop recursive calls
if self._resetting_modified_flag: return
self.clearModifiedFlag()
self.beenModified(event)
def bindChangeEvent(self, function):
self.function = function
def beenModified(self, event=None):
# call the user's function
if hasattr(self, 'function') and self.callFunction:
self.function()
def clearModifiedFlag(self):
self._resetting_modified_flag = True
try:
# reset the modified flag (this raises a modified event!)
self.tk.call(self._w, 'edit', 'modified', 0)
finally:
self._resetting_modified_flag = False
def getText(self):
return self.get('1.0', END + '-1c')
def getTextAreaHash(self):
text = self.getText()
m = hashlib.md5()
if PYTHON2:
m.update(text)
else:
m.update(str.encode(text))
md5 = m.digest()
# md5 = hashlib.md5(str.encode(text)).digest()
return md5
def highlightPattern(self, pattern, tag, start="1.0", end="end", regexp=False):
'''Apply the given tag to all text that matches the given pattern
If 'regexp' is set to True, pattern will be treated as a regular
expression according to Tcl's regular expression syntax.
'''
start = self.index(start)
end = self.index(end)
self.mark_set("matchStart", start)
self.mark_set("matchEnd", start)
self.mark_set("searchLimit", end)
count = IntVar()
while True:
index = self.search(pattern, "matchEnd","searchLimit", count=count, regexp=regexp)
if index == "": break
if count.get() == 0: break # degenerate pattern which matches zero-length strings
self.mark_set("matchStart", index)
self.mark_set("matchEnd", "%s+%sc" % (index, count.get()))
self.tag_add(tag, "matchStart", "matchEnd")
# uses multiple inheritance
class AjText(Text, TextParent):
def __init__(self, parent, **opts):
super(AjText, self).__init__(parent, **opts)
self._init() # call TextParent initialiser
class AjScrolledText(scrolledtext.ScrolledText, TextParent):
def __init__(self, parent, **opts):
super(AjScrolledText, self).__init__(parent, **opts)
self._init() # call TextParent initialiser
#######################
# Widget to look like a label, but allow selection...
#######################
class SelectableLabel(Entry, object):
def __init__(self, parent, **opts):
super(SelectableLabel, self).__init__(parent)
self.configure(relief=FLAT, state="readonly", readonlybackground='#FFFFFF', fg='#000000', highlightthickness=0)
self.var = StringVar(parent)
self.configure(textvariable=self.var)
self.configure(**opts)
def cget(self, kw):
if kw == "text":
return self.var.get()
else:
return super(SelectableLabel, self).cget(kw)
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "text" in kw:
self.var.set(kw.pop("text"))
if "bg" in kw:
kw["readonlybackground"] = kw.pop("bg")
# propagate anything left
super(SelectableLabel, self).config(cnf, **kw)
#######################
# Frame with built in scrollbars and canvas for placing stuff on
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# Modified with help from idlelib TreeWidget.py
#######################
class ScrollPane(frameBase, object):
def __init__(self, parent, resize=False, disabled=None, **opts):
super(ScrollPane, self).__init__(parent)
# self.config(padx=1, pady=1, bd=0)
self.resize = resize
self.hDisabled = disabled == "horizontal"
self.vDisabled = disabled == "vertical"
# make the ScrollPane expandable
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
if not self.vDisabled:
self.vscrollbar = AutoScrollbar(self)
opts['yscrollcommand'] = self.vscrollbar.set
self.vscrollbar.grid(row=0, column=1, sticky=N + S + E)
if not self.hDisabled:
self.hscrollbar = AutoScrollbar(self, orient=HORIZONTAL)
opts['xscrollcommand'] = self.hscrollbar.set
self.hscrollbar.grid(row=1, column=0, sticky=E + W + S)
self.canvas = Canvas(self, **opts)
self.canvas.config(highlightthickness=0, bd=0)
self.canvas.grid(row=0, column=0, sticky=N + S + E + W)
if not self.vDisabled:
self.vscrollbar.config(command=self.canvas.yview)
if not self.hDisabled:
self.hscrollbar.config(command=self.canvas.xview)
self.canvas.bind("<Enter>", self._mouseEnter)
self.canvas.bind("<Leave>", self._mouseLeave)
self.b_ids = []
self.canvas.focus_set()
self.interior = frameBase(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=NW)
if self.resize:
self.canvas.bind('<Configure>', self._updateWidth)
else:
self.interior.bind('<Configure>', self._updateWidth)
def _updateWidth(self, event):
if self.resize:
canvas_width = event.width
if canvas_width == 0:
canvas_width = self.canvas.winfo_width()
interior_width = self.interior.winfo_reqwidth()
if canvas_width < interior_width: canvas_width = interior_width
self.canvas.itemconfig(self.interior_id, width=canvas_width)
else:
size = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
def config(self, **kw):
self.configure(**kw)
def configure(self, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "bg" in kw:
self.canvas.config(bg=kw["bg"])
self.interior.config(bg=kw["bg"])
if "width" in kw:
self.canvas.config(width=kw["width"])
if "height" in kw:
self.canvas.config(height=kw["height"])
super(ScrollPane, self).configure(**kw)
# unbind any saved bind ids
def _unbindIds(self):
if len(self.b_ids) == 0:
return
if gui.GET_PLATFORM() == gui.LINUX:
self.canvas.unbind("<4>", self.b_ids[0])
self.canvas.unbind("<5>", self.b_ids[1])
self.canvas.unbind("<Shift-4>", self.b_ids[2])
self.canvas.unbind("<Shift-5>", self.b_ids[3])
else: # Windows and MacOS
self.canvas.unbind("<MouseWheel>", self.b_ids[0])
self.canvas.unbind("<Shift-MouseWheel>", self.b_ids[1])
self.canvas.unbind("<Key-Prior>", self.b_ids[4])
self.canvas.unbind("<Key-Next>", self.b_ids[5])
self.canvas.unbind("<Key-Up>", self.b_ids[6])
self.canvas.unbind("<Key-Down>", self.b_ids[7])
self.canvas.unbind("<Key-Left>", self.b_ids[8])
self.canvas.unbind("<Key-Right>", self.b_ids[9])
self.canvas.unbind("<Home>", self.b_ids[10])
self.canvas.unbind("<End>", self.b_ids[11])
self.b_ids = []
# bind mouse scroll to this widget only when mouse is over
def _mouseEnter(self, event):
self._unbindIds()
if gui.GET_PLATFORM() == gui.LINUX:
self.b_ids.append(self.canvas.bind_all("<4>", self._vertMouseScroll))
self.b_ids.append(self.canvas.bind_all("<5>", self._vertMouseScroll))
self.b_ids.append(self.canvas.bind_all("<Shift-4>", self._horizMouseScroll))
self.b_ids.append(self.canvas.bind_all("<Shift-5>", self._horizMouseScroll))
else: # Windows and MacOS
self.b_ids.append(self.canvas.bind_all("<MouseWheel>", self._vertMouseScroll))
self.b_ids.append(self.canvas.bind_all("<Shift-MouseWheel>", self._horizMouseScroll))
self.b_ids.append(None)
self.b_ids.append(None)
self.b_ids.append(self.canvas.bind_all("<Key-Prior>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Key-Next>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Key-Up>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Key-Down>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Key-Left>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Key-Right>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<Home>", self._keyPressed))
self.b_ids.append(self.canvas.bind_all("<End>", self._keyPressed))
# remove mouse scroll binding, when mouse leaves
def _mouseLeave(self, event):
self._unbindIds()
def _horizMouseScroll(self, event):
if not self.hDisabled and not self.hscrollbar.hidden:
self._mouseScroll(True, event)
def _vertMouseScroll(self, event):
if not self.vDisabled and not self.vscrollbar.hidden:
self._mouseScroll(False, event)
def _mouseScroll(self, horiz, event):
direction = 0
# get direction
if event.num == 4:
direction = -1
elif event.num == 5:
direction = 1
elif event.delta > 100:
direction = int(-1 * (event.delta/120))
elif event.delta > 0:
direction = -1 * event.delta
elif event.delta < -100:
direction = int(-1 * (event.delta/120))
elif event.delta < 0:
direction = -1 * event.delta
else:
return # shouldn't happen
if horiz:
self.xscroll(direction, "units")
else:
self.yscroll(direction, "units")
def getPane(self):
return self.canvas
def _keyPressed(self, event):
# work out if alt/ctrl/shift are pressed
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
state = event.state
ctrl = (state & 0x4) != 0
alt = (state & 0x8) != 0 or (state & 0x80) != 0 # buggy
shift = (state & 0x1) != 0
if event.type == "2":
# up and down arrows
if event.keysym == "Up": # event.keycode == 38
if ctrl:
self.yscroll(-1, "pages")
else:
self.yscroll(-1, "units")
elif event.keysym == "Down": # event.keycode == 40
if ctrl:
self.yscroll(1, "pages")
else:
self.yscroll(1, "units")
# left and right arrows
elif event.keysym == "Left": # event.keycode == 37
if ctrl:
self.xscroll(-1, "pages")
else:
self.xscroll(-1, "units")
elif event.keysym == "Right": # event.keycode == 39
if ctrl:
self.xscroll(1, "pages")
else:
self.xscroll(1, "units")
# page-up & page-down keys
elif event.keysym == "Prior": # event.keycode == 33
if ctrl:
self.xscroll(-1, "pages")
else:
self.yscroll(-1, "pages")
elif event.keysym == "Next": # event.keycode == 34
if ctrl:
self.xscroll(1, "pages")
else:
self.yscroll(1, "pages")
# home & end keys
elif event.keysym == "Home": # event.keycode == 36
if ctrl:
self.scrollLeft()
else:
self.scrollTop()
elif event.keysym == "End": # event.keycode == 35
if ctrl:
self.scrollRight()
else:
self.scrollBottom()
return "break"
else:
pass # shouldn't happen
def xscroll(self, direction, value=None):
if not self.hDisabled and not self.hscrollbar.hidden:
if value is not None: self.canvas.xview_scroll(direction, value)
else: self.canvas.xview_moveto(direction)
def yscroll(self, direction, value=None):
if not self.vDisabled and not self.vscrollbar.hidden:
if value is not None: self.canvas.yview_scroll(direction, value)
else: self.canvas.yview_moveto(direction)
# functions to scroll to the beginning or end
def scrollLeft(self):
self.xscroll(0.0)
def scrollRight(self):
self.xscroll(1.0)
def scrollTop(self):
self.yscroll(0.0)
def scrollBottom(self):
self.yscroll(1.0)
#################################
# Additional Dialog Classes
#################################
# the main dialog class to be extended
class Dialog(Toplevel, object):
def __init__(self, parent, title=None):
super(Dialog, self).__init__(parent)
self.transient(parent)
self.withdraw()
parent.POP_UP = self
if title:
self.title(title)
self.parent = parent
self.result = None
# create a frame to hold the contents
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# create the buttons
self.buttonbox()
gui.SET_LOCATION(x="CENTER", up=150, win=self)
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.deiconify()
self.initial_focus.focus_set()
self.wait_window(self)
# override to create the contents of the dialog
# should return the widget to give focus to
def body(self, master):
pass
# add standard buttons
# override if you don't want the standard buttons
def buttonbox(self):
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
# called when ok button pressed
def ok(self, event=None):
# only continue if validate() returns True
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
# call the validate function before calling the cancel function
self.apply()
self.cancel()
# called when cancel button pressed
def cancel(self, event=None):
self.grab_release()
self.parent.focus_set() # give focus back to the parent
self.destroy()
# override this to cancel closing the form
def validate(self):
return True
# override this to do something before closing
def apply(self):
pass
class SimpleEntryDialog(Dialog):
""" a base class for a simple data capture dialog """
def __init__(self, parent, title, question, defaultvar=None):
self.error = False
self.question = question
self.defaultVar=defaultvar
super(SimpleEntryDialog, self).__init__(parent, title)
def clearError(self, e):
if self.error:
self.error = False
self.l1.config(text="")
def setError(self, message):
self.parent.bell()
self.error = True
self.l1.config(text=message)
# a label for the question, an entry for the answer
# a label for an error message
def body(self, master):
Label(master, text=self.question).grid(row=0)
self.e1 = Entry(master)
if self.defaultVar is not None:
self.e1.var = self.defaultVar
self.e1.config(textvariable=self.e1.var)
self.e1.var.auto_id = None
self.e1.icursor("end")
self.l1 = Label(master, fg="#FF0000")
self.e1.grid(row=1)
self.l1.grid(row=2)
self.e1.bind("<Key>", self.clearError)
return self.e1
class TextDialog(SimpleEntryDialog):
""" captures a string - must not be empty """
def __init__(self, parent, title, question, defaultVar=None):
super(TextDialog, self).__init__(parent, title, question, defaultVar)
def validate(self):
res = self.e1.get()
if len(res.strip()) == 0:
self.setError("Invalid text.")
return False
else:
self.result = res
return True
class NumDialog(SimpleEntryDialog):
""" captures a number - must be a valid float """
def __init__(self, parent, title, question):
super(NumDialog, self).__init__(parent, title, question)
def validate(self):
res = self.e1.get()
try:
self.result = float(res) if '.' in res else int(res)
return True
except ValueError:
self.setError("Invalid number.")
return False
#####################################
# Toplevel Stuff
#####################################
class SubWindow(Toplevel, object):
def __init__(self, win, parent, name, title=None, stopFunc=None, modal=False, blocking=False, transient=False, grouped=True):
super(SubWindow, self).__init__()
if title is None: title = name
self.win = self
self.title(title)
self._parent = parent
self.withdraw()
self.escapeBindId = None # used to exit fullscreen
self.stopFunction = None # used to stop
self.shown = False
self.locationSet = False
self.isFullscreen = False
self.modal = modal
self.protocol("WM_DELETE_WINDOW", gui.MAKE_FUNC(stopFunc, name))
# have this respond to topLevel window style events
if transient: self.transient(self._parent)
# group this with the topLevel window
if grouped: self.group(self._parent)
self.blocking = blocking
if self.blocking: self.killLab = None
self.canvasPane = CanvasDnd(self)
self.canvasPane.pack(fill=BOTH, expand=True)
def setLocation(self, x, y):
x, y = gui.PARSE_TWO_PARAMS(x, y)
self.geometry("+%d+%d" % (x, y))
self.locationSet = True
def hide(self, useStopFunction=False):
if useStopFunction:
if self.stopFunction is not None and not self.stopFunction():
return
self.withdraw()
if self.blocking and self.killLab is not None:
self.killLab.destroy()
self.killLab = None
if self.modal:
self.grab_release()
self._parent.focus_set()
def prepDestroy(self):
if self.stopFunction is None or self.stopFunction():
if self.blocking and self.killLab is not None:
self.killLab.destroy()
self.killLab = None
self.withdraw()
self.grab_release()
self._parent.focus_set()
def show(self):
self.shown = True
if not self.locationSet:
gui.SET_LOCATION('c', win=self)
self.locationSet = True
else:
gui.trace("Using previous position")
self.deiconify()
self.config(takefocus=True)
# stop other windows receiving events
if self.modal:
self.grab_set()
gui.trace("%s set to MODAL", self.title)
self.focus_set()
def block(self):
# block here - wait for the subwindow to close
if self.blocking and self.killLab is None:
gui.trace("%s set to BLOCK", self.title)
self.killLab = Label(self)
self.parent.wait_window(self.killLab)
#####################################
# SimpleTable Stuff
#####################################
class GridCell(Label, object):
def __init__(self, parent, fonts, isHeader=False, **opts):
super(GridCell, self).__init__(parent, **opts)
self.selected = False
self.isHeader = isHeader
self.config(borderwidth=1, highlightthickness=0, padx=0, pady=0)
self.updateFonts(fonts)
if not self.isHeader:
self.bind("<Enter>", self.mouseEnter)
self.bind("<Leave>", self.mouseLeave)
self.bind("<Button-1>", self.toggleSelection)
def updateFonts(self, fonts):
self.fonts = fonts
if self.isHeader:
self.config(font=self.fonts["headerFont"], background=self.fonts["headerBg"], fg=self.fonts['headerFg'], relief=self.fonts['border'])
else:
if self.selected:
self.config(font=self.fonts["dataFont"], background=self.fonts["selectedBg"], fg=self.fonts['selectedFg'], relief=self.fonts['border'])
else:
self.config(font=self.fonts["dataFont"], background=self.fonts["inactiveBg"], fg=self.fonts['inactiveFg'], relief=self.fonts['border'])
def setText(self, text):
self.config(text=text)
def clear(self):
self.config(text="")
def mouseEnter(self, event=None):
self.config(background=self.fonts["overBg"], fg=self.fonts["overFg"])
def mouseLeave(self, event=None):
if self.selected:
self.config(background=self.fonts["selectedBg"], fg=self.fonts["selectedFg"])
else:
self.config(background=self.fonts["inactiveBg"], fg=self.fonts["inactiveFg"])
def select(self):
self.config(background=self.fonts["selectedBg"], fg=self.fonts["selectedFg"])
self.selected = True
def deselect(self):
self.config(background=self.fonts["inactiveBg"], fg=self.fonts["inactiveFg"])
self.selected = False
def toggleSelection(self, event=None):
if self.selected:
self.deselect()
else:
self.select()
# first row is used as a header
# SimpleTable is a ScrollPane, where a Frame has been placed on the canvas - called GridContainer
class SimpleTable(ScrollPane):
def __init__(self, parent, title, data, action=None, addRow=None,
actionHeading="Action", actionButton="Press",
addButton="Add", showMenu=False, queueFunction=None, border='solid', **opts):
self.fonts = {
"dataFont": tkFont.Font(family="Arial", size=11),
"headerFont": tkFont.Font(family="Arial", size=13, weight='bold'),
"buttonFont": tkFont.Font(family="Arial", size=10),
"headerBg": "#6e7274",
"headerFg": "#FFFFFF",
"selectedBg": "#D3D3D3",
"selectedFg": "#000000",
"inactiveBg": "#FFFFFF",
"inactiveFg":"#000000",
"overBg": "#E0E9EE",
"overFg": "#000000",
"border": border.lower()
}
super(SimpleTable, self).__init__(parent, resize=True, **{})
# actions
self.addRowEntries = addRow
self.action = action
self.queueFunction = queueFunction
# lists to store the data in
self.cells = []
self.entries = []
self.entryProps = []
self.rightColumn = []
# database stuff
self.db = None
self.dbTable = None
self.config(**opts)
# menu stuff
self.showMenu = showMenu
self.lastSelected = None
if self.showMenu: self._buildMenu()
# how many rows & columns
self.numColumns = 0
# find out the max number of cells in a row
if sqlite3 is not None and sqlite3 is not False and isinstance(data, sqlite3.Cursor):
self.numColumns = len([description[0] for description in data.description])
else:
self.numColumns = len(max(data, key=len))
# headings
self.actionHeading = actionHeading
if type(actionButton) in (list, tuple):
self.actionButton = actionButton
else:
self.actionButton = [actionButton]
self.addButton= addButton
# add the grid container to the frame
self.interior.bind("<Configure>", self._refreshGrids)
gui.trace("SimpleTable %s constructed, adding rows", title)
self.addRows(data, scroll=False)
def config(self, cnf=None, **kw):
self.configure(cnf, **kw)
def configure(self, cnf=None, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
updateCells = False
if "disabledentries" in kw:
entries = kw.pop("disabledentries")
list(map(self.disableEntry, entries))
if "bg" in kw:
bg = kw.pop("bg")
self.canvas.config(bg=bg)
self.interior.config(bg=bg)
if "activebg" in kw:
self.fonts["selectedBg"] = kw.pop("activebg", self.fonts['selectedBg'])
updateCells = True
if "activefg" in kw:
self.fonts["selectedFg"] = kw.pop("activefg", self.fonts['selectedFg'])
updateCells = True
if "inactivebg" in kw:
self.fonts["inactiveBg"] = kw.pop("inactivebg", self.fonts['inactiveBg'])
updateCells = True
if "inactivefg" in kw:
self.fonts["inactiveFg"] = kw.pop("inactivefg", self.fonts['inactiveFg'])
updateCells = True
if "font" in kw:
font = kw.pop("font")
self.fonts["headerFont"].configure(family=font.actual("family"), size=font.actual("size") + 2, weight="bold")
updateCells = True
if "buttonfont" in kw:
buttonFont = kw.pop("buttonfont")
self.fonts["buttonFont"].configure(family=buttonFont.actual("family"), size=buttonFont.actual("size")-2)
updateCells = True
if "border" in kw:
self.fonts["border"]=kw.pop("border").lower().strip()
updateCells = True
if updateCells: self._configCells()
# allow labels to be updated
if "actionheading" in kw:
self.actionHeading = kw.pop("actionheading")
if len(self.rightColumn) > 0:
self.rightColumn[0].config(text=self.actionHeading)
if "actionbutton" in kw:
self.actionButton = kw.pop("actionbutton")
if len(self.rightColumn) > 1:
for pos in range(1, len(self.rightColumn)):
self.rightColumn[pos].config(text=self.actionButton)
if "addbutton" in kw:
self.addButton = kw.pop("addbutton")
self.ent_but.config(text=self.addButton)
super(SimpleTable, self).configure(**kw)
def _configCells(self):
gui.trace("Config all cells")
for row in self.cells:
for cell in row:
gui.trace("Update Fonts: %s, %s", row, cell)
cell.updateFonts(self.fonts)
def addRow(self, rowData, scroll=True):
self.queueFunction(self._hideEntryBoxes)
self.queueFunction(self._addRow, rowData)
self.queueFunction(self._showEntryBoxes)
self.queueFunction(self.canvas.event_generate, "<Configure>")
if scroll:
self.queueFunction(self.scrollBottom)
def addRows(self, data, scroll=True):
self._hideEntryBoxes()
if sqlite3 is not None and sqlite3 is not False and isinstance(data, sqlite3.Cursor):
self._addRow([description[0] for description in data.description])
list(map(self._addRow, data))
gui.trace("Added all rows in addRows()")
self._showEntryBoxes()
self.canvas.event_generate("<Configure>")
if scroll:
self.scrollBottom()
# this will include the header row
def getRowCount(self):
return len(self.cells)-1
def getRow(self, rowNumber):
if 0 > rowNumber >= len(self.cells):
raise Exception("Invalid row number.")
else:
data = []
for cell in self.cells[rowNumber+1]:
data.append(str(cell.cget('text')))
return data
def setHeaders(self, data):
if sqlite3 is not None and sqlite3 is not False and isinstance(data, sqlite3.Cursor):
data = [description[0] for description in data.description]
cellsLen = len(self.cells[0])
newCols = len(data) - cellsLen
if newCols > 0:
for pos in range(cellsLen, cellsLen + newCols):
self.addColumn(pos, [])
elif newCols < 0:
for pos in range(newCols*-1):
cellsLen = len(self.cells[0])
self.deleteColumn(cellsLen-1)
dataLen = len(data)
cellsLen = len(self.cells[0])
for count in range(cellsLen):
cell = self.cells[0][count]
if count < dataLen:
cell.setText(data[count])
else:
cell.clear()
def replaceRow(self, rowNum, data):
if 0 > rowNum >= len(self.cells):
raise Exception("Invalid row number.")
else:
dataLen = len(data)
for count in range(len(self.cells[rowNum+1])):
cell = self.cells[rowNum+1][count]
if count < dataLen:
cell.setText(data[count])
else:
cell.clear()
self.canvas.event_generate("<Configure>")
def deleteAllRows(self):
list(map(self._quickDeleteRow, range(len(self.cells)-2, -1, -1)))
self.canvas.event_generate("<Configure>")
self._deleteEntryBoxes()
def _quickDeleteRow(self, position):
self.deleteRow(position, True)
def deleteRow(self, position, pauseUpdate=False):
if 0 > position >= len(self.cells):
raise Exception("Invalid row number.")
else:
# forget the specified row & button
for cell in self.cells[position+1]:
cell.grid_forget()
if self.action is not None:
self.rightColumn[position+1].grid_forget()
# loop through all rows after, forget them, move them, grid them
for loop in range(position+1, len(self.cells)-1):
# forget the next row
for cell in self.cells[loop+1]:
cell.grid_forget()
# move data
self.cells[loop] = self.cells[loop+1]
# add its button
if self.action is not None:
self.rightColumn[loop+1].grid_forget()
self.rightColumn[loop] = self.rightColumn[loop+1]
self.rightColumn[loop+1].grid(row=loop, column=self.numColumns, sticky=N+E+S+W)
# update its button
for but in self.rightColumn[loop].but:
but.config(command=lambda name=but.cget['text'], row=loop, *args: self.action(name, row))
# re-grid them
for cellNum in range(len(self.cells[loop])):
self.cells[loop][cellNum].grid(row=loop, column=cellNum, sticky=N+E+S+W)
# lose last item from lists
self.cells = self.cells[:-1]
self.rightColumn = self.rightColumn[:-1]
self._updateButtons(position)
if not pauseUpdate: self.canvas.event_generate("<Configure>")
def _addRow(self, rowData):
if self.numColumns == 0:
raise Exception("No columns to add to.")
else:
gui.trace(rowData)
rowNum = len(self.cells)
numCols = len(rowData)
newRow = []
for cellNum in range(self.numColumns):
# get a val ("" if no val)
if cellNum >= numCols:
val = ""
else:
val = rowData[cellNum]
lab = self._createCell(rowNum, cellNum, val)
newRow.append(lab)
self.cells.append(newRow)
# add some buttons for each row
if self.action is not None:
# add the title
if rowNum == 0:
widg = GridCell(self.interior, self.fonts, isHeader=True, text=self.actionHeading)
# add a button
else:
widg = GridCell(self.interior, self.fonts, isHeader=True)
widg.config(borderwidth=0, bg=self.fonts['headerBg'])
widg.but=[]
val = rowNum - 1
butCount = len(self.actionButton)
for row, text in enumerate(self.actionButton):
if butCount == 1:
command=lambda row=val, *args: self.action(row)
else:
command=lambda name=text, row=val, *args: self.action(name, row)
but = Button(widg, font=self.fonts["buttonFont"], text=text,
bd=0, highlightthickness=0, command=command)
if gui.GET_PLATFORM() in [gui.MAC, gui.LINUX]:
but.config(highlightbackground=widg.cget("bg"))
but.grid(row=row, sticky=N+E+S+W, pady=1)
widg.but.append(but)
self.rightColumn.append(widg)
widg.grid(row=rowNum, column=cellNum + 1, sticky=N+E+S+W)
def _updateButtons(self, position=0):
for pos in range(position+1, len(self.rightColumn)):
for but in self.rightColumn[pos].but:
but.config(command=lambda name=but.cget['text'], row=pos-1, *args: self.action(name, row))
def _createCell(self, rowNum, cellNum, val):
if rowNum == 0: # adding title row
lab = GridCell(self.interior, self.fonts, isHeader=True, text=val)
lab.gridPos = ''.join(["h-", str(cellNum)])
lab.bind("<Button-1>", self._selectColumn)
else:
lab = GridCell(self.interior, self.fonts, text=val)
lab.gridPos = ''.join([str(rowNum - 1), "-", str(cellNum)])
if self.showMenu:
if gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]:
lab.bind('<Button-3>', self._rightClick)
else:
lab.bind('<Button-2>', self._rightClick)
lab.grid(row=rowNum, column=cellNum, sticky=N+E+S+W)
self.interior.columnconfigure(cellNum, weight=1)
self.interior.rowconfigure(rowNum, weight=1)
return lab
def _selectColumn(self, event=None):
columnNumber = int(event.widget.gridPos.split("-")[1])
self.selectColumn(columnNumber)
def selectColumn(self, columnNumber, highlight=None):
if columnNumber < 0 or columnNumber >= self.numColumns:
raise Exception("Invalid column number.")
else:
selected = self.cells[1][columnNumber].selected
for rowCount in range(1, len(self.cells)):
if highlight is None:
if selected:
self.cells[rowCount][columnNumber].deselect()
else:
self.cells[rowCount][columnNumber].select()
else:
if highlight:
self.cells[rowCount][columnNumber].mouseEnter()
else:
self.cells[rowCount][columnNumber].mouseLeave()
def _selectRow(self, event=None):
rowNumber = event.widget.gridPos.split("-")[0]
self.selectRow(rowNumber)
def selectRow(self, rowNumber, highlight=None):
if rowNumber == "h": rowNumber = 0
else: rowNumber = int(rowNumber) + 1
if 1 > rowNumber >= len(self.cells)+1:
raise Exception("Invalid row number.")
else:
selected = self.cells[rowNumber][0].selected
for cell in self.cells[rowNumber]:
if highlight is None:
if selected: cell.deselect()
else: cell.select()
else:
if highlight: cell.mouseEnter()
else: cell.mouseLeave()
def _buildMenu(self):
self.menu = Menu(self, tearoff=0)
self.menu.add_command(label="Copy", command=lambda: self._menuHelper("copy"))
self.menu.add_command(label="Paste", command=lambda: self._menuHelper("paste"))
self.menu.add_command(label="Edit", command=lambda: self._menuHelper("edit"))
self.menu.add_command(label="Clear", command=lambda: self._menuHelper("clear"))
self.menu.add_separator()
self.menu.add_command(label="Delete Column", command=lambda: self._menuHelper("dc"))
self.menu.add_command(label="Delete Row", command=lambda: self._menuHelper("dr"))
self.menu.add_separator()
self.menu.add_command(label="Sort Ascending", command=lambda: self._menuHelper("sa"))
self.menu.add_command(label="Sort Descending", command=lambda: self._menuHelper("sd"))
self.menu.add_separator()
self.menu.add_command(label="Insert Before", command=lambda: self._menuHelper("cb"))
self.menu.add_command(label="Insert After", command=lambda: self._menuHelper("ca"))
self.menu.add_separator()
self.menu.add_command(label="Select Cell", command=lambda: self._menuHelper("select"))
self.menu.add_command(label="Select Row", command=lambda: self._menuHelper("selectRow"))
self.menu.add_command(label="Select Column", command=lambda: self._menuHelper("selectColumn"))
self.menu.bind("<FocusOut>", lambda e: self.menu.unpost())
def _configMenu(self, isHeader=False):
if isHeader:
self.menu.entryconfigure("Delete Row", state=DISABLED)
self.menu.entryconfigure("Select Cell", state=DISABLED)
self.menu.entryconfigure("Select Row", state=DISABLED)
else:
self.menu.entryconfigure("Delete Row", state=NORMAL)
self.menu.entryconfigure("Select Cell", state=NORMAL)
self.menu.entryconfigure("Select Row", state=NORMAL)
def _rightClick(self, event):
if self.lastSelected is None or not self.lastSelected.isHeader == event.widget.isHeader:
self._configMenu(event.widget.isHeader)
self.lastSelected = event.widget
self.menu.focus_set()
self.menu.post(event.x_root - 10, event.y_root - 10)
return "break"
def _menuHelper(self, action):
self.update_idletasks()
vals=self.lastSelected.gridPos.split("-")
gui.trace('Table Menu Helper: %s-%s', action, vals)
if action == "dc":
self.deleteColumn(int(vals[1]))
elif action == "dr" and vals[0] != "h":
self.deleteRow(int(vals[0]))
elif action == "cb":
self.addColumn(int(vals[1]), [])
elif action == "ca":
self.addColumn(int(vals[1])+1, [])
elif action == "select" and vals[0] != "h":
self.lastSelected.toggleSelection()
elif action == "selectRow":
self.selectRow(int(vals[0]))
elif action == "selectColumn":
self.selectColumn(int(vals[1]))
if action == "sa":
self.sort(int(vals[1]))
if action == "sd":
self.sort(int(vals[1]), descending=True)
elif action == "copy":
val=self.lastSelected.cget("text")
self.clipboard_clear()
self.clipboard_append(val)
elif action == "paste":
try: self.lastSelected.config(text=self.clipboard_get())
except: pass
elif action == "clear":
self.lastSelected.config(text="")
elif action == "edit":
val=self.lastSelected.cget("text")
defaultVar = StringVar(self)
defaultVar.set(val)
newText = TextDialog(self, "Edit", "Enter the new text", defaultVar=defaultVar).result
if newText is not None:
self.lastSelected.config(text=newText)
def addColumn(self, columnNumber, data):
if columnNumber < 0 or columnNumber > self.numColumns:
raise Exception("Invalid column number.")
else:
self._hideEntryBoxes()
gui.trace('Adding column: %s', columnNumber)
cellCount = len(self.cells)
# move the right column, if necessary
if self.action is not None:
for rowPos in range(cellCount):
self.rightColumn[rowPos].grid_forget()
self.rightColumn[rowPos].grid(row=rowPos, column=self.numColumns+1, sticky=N+E+S+W)
# move the button
self.ent_but.lab.grid_forget()
self.ent_but.lab.grid(row=cellCount, column=self.numColumns+2, sticky=N+E+S+W)
# add another entry
ent = self._createEntryBox(self.numColumns)
self.entries.append(ent)
self.entryProps.append({'disabled':False})
# move all columns including this position right one
for colPos in range(self.numColumns-1, columnNumber-1, -1):
for rowPos in range(cellCount):
cell = self.cells[rowPos][colPos]
cell.grid_forget()
cell.grid(row=rowPos, column=colPos+1, sticky=N+E+S+W)
val = rowPos-1
if val == -1: val ='h'
else: val = str(val)
cell.gridPos = ''.join(val, "-", str(colPos+1))
# then add this column
dataLen = len(data)
for rowPos in range(cellCount):
if rowPos < dataLen:
val = data[rowPos]
else:
val = ""
lab = self._createCell(rowPos, columnNumber, val)
self.cells[rowPos].insert(columnNumber, lab)
self.numColumns += 1
self._showEntryBoxes()
self.canvas.event_generate("<Configure>")
def deleteColumn(self, columnNumber):
if columnNumber < 0 or columnNumber >= self.numColumns:
raise Exception("Invalid column number: %s.", columnNumber)
else:
# hide the entries
self._hideEntryBoxes()
cellCount = len(self.cells)
# delete the column
for row in self.cells:
row[columnNumber].grid_forget()
del row[columnNumber]
# update the entry boxes
if self.addRowEntries is not None and len(self.entries) >= columnNumber:
self.entries[columnNumber].grid_forget()
del self.entries[columnNumber]
del self.entryProps[columnNumber]
# move the remaining columns
for rowCount in range(cellCount):
row = self.cells[rowCount]
for colCount in range(columnNumber, len(row)):
cell = row[colCount]
cell.grid_forget()
cell.grid(row=rowCount, column=colCount, sticky=N+E+S+W)
# update the cells
val = rowCount -1
if val == -1: val = 'h'
else: val = str(val)
cell.gridPos = ''.join(val, "-", str(colCount))
# move the buttons
if self.action is not None:
for rowPos in range(cellCount):
self.rightColumn[rowPos].grid_forget()
self.rightColumn[rowPos].grid(row=rowPos, column=self.numColumns-1, sticky=N+E+S+W)
self.numColumns -= 1
# show the entry boxes
self._showEntryBoxes()
self.canvas.event_generate("<Configure>")
def sort(self, columnNumber, descending=False):
order = self._getSortedData(columnNumber, descending)
for k, val in enumerate(order):
for c, cell in enumerate(self.cells[k+1]):
cell.config(text=val[c])
cell.selected=False
cell.mouseLeave()
def _getSortedData(self, columnNumber, descending=False):
data = []
for pos in range(len(self.cells)-1):
row = self.getRow(pos)
data.append(row)
return sorted(data,key=lambda l:l[columnNumber], reverse=descending)
def _hideEntryBoxes(self):
if self.addRowEntries is None or len(self.entries) == 0:
return
for e in self.entries:
e.lab.grid_forget()
self.ent_but.lab.grid_forget()
def _deleteEntryBoxes(self):
self._hideEntryBoxes()
self.entries = []
self.entryProps = []
def _showEntryBoxes(self):
if self.addRowEntries is None: return
if len(self.entries) > 0:
cellCount = len(self.cells)
for pos in range(len(self.entries)):
self.entries[pos].lab.grid(row=cellCount, column=pos, sticky=N+E+S+W)
self.ent_but.lab.grid(row=cellCount, column=len(self.entries), sticky=N+E+S+W)
else:
self._createEntryBoxes()
def _configEntryBoxes(self):
if self.addRowEntries is None: return
# config the entries
for cellNum in range(self.numColumns):
if self.entryProps[cellNum]['disabled']:
self.entries[cellNum].config(state='readonly')
def disableEntry(self, pos, disabled=True):
self.entryProps[pos]['disabled'] = disabled
self._configEntryBoxes()
def _createEntryBoxes(self):
if self.addRowEntries is None: return
# add the entries
for cellNum in range(self.numColumns):
ent = self._createEntryBox(cellNum)
self.entries.append(ent)
self.entryProps.append({'disabled':False})
# add a button
lab = GridCell(self.interior, self.fonts, isHeader=True)
lab.grid(row=len(self.cells), column=self.numColumns, sticky=N+E+S+W)
self.ent_but = Button(
lab, font=self.fonts["buttonFont"],
text=self.addButton,
command=gui.MAKE_FUNC(self.addRowEntries, "newRow")
)
if gui.GET_PLATFORM() in [gui.MAC, gui.LINUX]:
self.ent_but.config(highlightbackground=lab.cget("bg"))
self.ent_but.lab = lab
self.ent_but.pack(expand=True, fill='both')
def _createEntryBox(self, cellNum):
# create the container
lab = GridCell(self.interior, self.fonts, isHeader=True)
lab.grid(row=len(self.cells), column=cellNum, sticky=N + E + S + W)
# create the entry
ent = Entry(lab, relief=FLAT, borderwidth=1, highlightbackground='black', highlightthickness=1, width=6, disabledbackground='grey')
ent.pack(expand=True, fill='both')
ent.lab = lab
return ent
def getEntries(self):
return [e.get() for e in self.entries]
def getSelectedCells(self):
selectedCells = []
for row in self.cells:
for cell in row:
if cell.selected:
selectedCells.append(cell.gridPos)
return selectedCells
def _refreshGrids(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
##########################
# MicroBit Simulator
##########################
class MicroBitSimulator(Frame, object):
COLOURS = {0:'#000000',1:'#110000',2:'#220000',3:'#440000',4:'#660000',5:'#880000',6:'#aa0000',7:'#cc0000',8:'#ee0000',9:'#ff0000'}
SIZE = 5
HEART = "09090:90909:90009:09090:00900"
def __init__(self, parent, **opts):
super(MicroBitSimulator, self).__init__(parent, **opts)
self.matrix = []
for i in range(self.SIZE):
self.matrix.append([])
for i in range(self.SIZE):
for j in range(self.SIZE):
self.matrix[i].append('')
for y in range(self.SIZE):
for x in range(self.SIZE):
self.matrix[x][y] = Label(self, bg='#000000', width=5, height=2)
self.matrix[x][y].grid(column=x, row=y, padx=5, pady=5)
self.update_idletasks()
def set_pixel(self, x, y, brightness):
self.matrix[x][y].config(bg=self.COLOURS[brightness])
self.update_idletasks()
def show(self, image):
rows = image.split(':')
for y in range(len(rows)):
for x in range(len(rows[0])):
self.matrix[x][y].config(bg=self.COLOURS[int(rows[y][x])])
self.update_idletasks()
def clear(self):
for y in range(self.SIZE):
for x in range(self.SIZE):
self.matrix[x][y].config(bg='#000000')
self.update_idletasks()
##########################
# Simple SplashScreen
##########################
class SplashScreen(Toplevel, object):
def __init__(self, parent, text="appJar", fill="#FF0000", stripe="#000000", fg="#FFFFFF", font=44):
super(SplashScreen, self).__init__(parent)
lab = Label(self, bg=stripe, fg=fg, text=text, height=3, width=50)
lab.config(font=("Courier", font))
lab.place(relx=0.5, rely=0.5, anchor=CENTER)
width = str(self.winfo_screenwidth())
height = str(self.winfo_screenheight())
self.geometry("%sx%s" % (width, height))
self.config(bg=fill)
self.attributes("-alpha", 0.95)
self.attributes("-fullscreen", True)
self.overrideredirect(1)
self.update()
##########################
# CopyAndPaste Organiser
##########################
class CopyAndPaste():
def __init__(self, topLevel, gui):
self.topLevel = topLevel
self.inUse = False
self.gui = gui
def setUp(self, widget):
self.inUse = True
# store globals
w = widget
wt = gui.GET_WIDGET_TYPE(widget)
if wt != "Menu":
self.widget = w
self.widgetType = wt
# query widget
self.canCut = False
self.canCopy = False
self.canSelect = False
self.canUndo = False
self.canRedo = False
self.canFont = False
try:
self.canPaste = len(self.topLevel.clipboard_get()) > 0
except:
self.canPaste = False
try:
if self.widgetType in ["Entry", "AutoCompleteEntry"]:
if widget.selection_present():
self.canCut = self.canCopy = True
if not self.widget.showingDefault and widget.index(END) > 0:
self.canSelect = True
elif self.widgetType in ["ScrolledText", "Text", "AjText", "AjScrolledText"]:
if widget.tag_ranges("sel"):
self.canCut = self.canCopy = True
self.canFont = True
if widget.index("end-1c") != "1.0":
self.canSelect = True
# if widget.edit_modified():
self.canUndo = True
self.canRedo = True
elif self.widgetType == "OptionMenu":
self.canCopy = True
self.canPaste = False
except Exception as e:
gui.warn("Error in EDIT menu: %s", self,widgetType)
gui.exception(e)
def copy(self):
if self.widgetType == "OptionMenu":
self.topLevel.clipboard_clear()
self.topLevel.clipboard_append(self.widget.var.get())
else:
self.widget.event_generate('<<Copy>>')
self.widget.selection_clear()
def cut(self):
if self.widgetType == "OptionMenu":
self.topLevel.bell()
else:
self.widget.event_generate('<<Cut>>')
self.widget.selection_clear()
def paste(self):
if self.widgetType in ["Entry", "AutoCompleteEntry"]:
# horrible hack to clear default text
name = self.gui._getWidgetName(self.widget)
self.gui._updateEntryDefault(name, mode="in")
self.widget.event_generate('<<Paste>>')
self.widget.selection_clear()
def undo(self):
self.widget.event_generate("<<Undo>>")
def redo(self):
self.widget.event_generate("<<Redo>>")
def clearClipboard(self):
self.topLevel.clipboard_clear()
def font(self, tag):
if tag in self.widget.tag_names(SEL_FIRST):
self.widget.tag_remove(tag, SEL_FIRST, SEL_LAST)
else:
self.widget.tag_add(tag, SEL_FIRST, SEL_LAST)
def clearText(self):
try:
self.widget.delete(0.0, END) # TEXT
except:
try:
self.widget.delete(0, END) # ENTRY
except:
self.topLevel.bell()
def selectAll(self):
try:
self.widget.select_range(0, END) # ENTRY
except:
try:
self.widget.tag_add("sel", "1.0", "end") # TEXT
except:
self.topLevel.bell()
# clear the undo/redo stack
def resetStack(self):
self.widget.edit_reset()
#####################################
# class to temporarily pause logging
#####################################
# usage:
# with PauseLogger():
# doSomething()
#####################################
class PauseLogger():
def __enter__(self):
# disable all warning of CRITICAL & below
logging.disable(logging.CRITICAL)
def __exit__(self, a, b, c):
logging.disable(logging.NOTSET)
#####################################
# class to temporarily pause function calling
#####################################
# usage:
# with PauseCallFunction(callFunction, widg):
# doSomething()
# relies on 3 variables in widg:
# var - the thing being traced
# cmd_id - linking to the trace
# cmd - the function called by the trace
#####################################
class PauseCallFunction():
def __init__(self, callFunction, widg, useVar=True):
self.callFunction = callFunction
self.widg = widg
if useVar:
self.tracer = self.widg.var
else:
self.tracer = self.widg
gui.trace("PauseCallFunction: callFunction=%s, useVar=%s", callFunction, useVar)
def __enter__(self):
if not self.callFunction and hasattr(self.widg, 'cmd'):
self.tracer.trace_vdelete('w', self.widg.cmd_id)
gui.trace("callFunction paused")
def __exit__(self, a, b, c):
if not self.callFunction and hasattr(self.widg, 'cmd'):
self.widg.cmd_id = self.tracer.trace('w', self.widg.cmd)
gui.trace("callFunction resumed")
#####################################
# classes to work with image maps
#####################################
class AjPoint(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "({},{})".format(self.x, self.y)
class AjRectangle(object):
def __init__(self, name, posn, w, h):
self.name = name
self.corner = posn
self.width = w
self.height = h
def __str__(self):
return "{3}:({0},{1},{2})".format(self.corner, self.width, self.height, self.name)
def contains(self, point):
return (self.corner.x <= point.x <= self.corner.x + self.width and
self.corner.y <= point.y <= self.corner.y + self.height)
class GoogleMap(LabelFrame, object):
""" Class to wrap a GoogleMap tile download into a widget"""
def __init__(self, parent, app, defaultLocation="Marlborough, UK", proxyString=None, useTtk=False, font=None):
super(GoogleMap, self).__init__(parent, text="GoogleMaps")
self.alive = True
self.API_KEY = ""
self.parent = parent
self.imageQueue = Queue.Queue()
self.defaultLocation = defaultLocation
self.currentLocation = None
self.app = app
self.proxyString = proxyString
if font is not None:
self.config(font=font)
self.TERRAINS = ("Roadmap", "Satellite", "Hybrid", "Terrain")
self.MAP_URL = "http://maps.google.com/maps/api/staticmap?"
self.GEO_URL = "https://maps.googleapis.com/maps/api/geocode/json?"
self.LOCATION_URL = "http://freegeoip.net/json/"
# self.LOCATION_URL = "http://ipinfo.io/json"
self.setCurrentLocation()
# the parameters that we store
# keeps getting updated, then sent to GoogleMaps
self.params = {}
self._setMapParams()
imgObj = None
self.rawData = None
self.mapData = None
self.request = None
self.app.thread(self.getMapData)
self.updateMapId = self.parent.after(500, self.updateMap)
# if we got some map data then load it
if self.mapData is not None:
try:
imgObj = PhotoImage(data=self.mapData)
self.h = imgObj.height()
self.w = imgObj.width()
# python 3.3 fails to load data
except Exception as e:
gui.exception(e)
if imgObj is None:
self.w = self.params['size'].split("x")[0]
self.h = self.params['size'].split("x")[1]
self.canvas = Canvas(self, width=self.w, height=self.h)
self.canvas.pack()#expand = YES, fill = BOTH)
self.image_on_canvas = self.canvas.create_image(1, 1, image=imgObj, anchor=NW)
self.canvas.img = imgObj
# will store the 3 buttons in an array
# they are actually labels - to hide border
# maes it easier to configure them
self.buttons = [
Label(self.canvas, text="-"),
Label(self.canvas, text="+"),
Label(self.canvas, text="H"),
gui._makeLink()(self.canvas, text="@", useTtk=useTtk)
]
B_FONT = tkFont.Font(family='Helvetica', size=10)
for b in self.buttons:
b.configure(width=3, relief=GROOVE, font=B_FONT)
if not useTtk:
b.configure(width=3, activebackground="#D2D2D2", relief=GROOVE, font=B_FONT)
if gui.GET_PLATFORM() == gui.MAC:
b.configure(cursor="pointinghand")
elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]:
b.configure(cursor="hand2")
#make it look like it's pressed
self.buttons[0].bind("<Button-1>",lambda e: self.buttons[0].config(relief=SUNKEN), add="+")
self.buttons[0].bind("<ButtonRelease-1>",lambda e: self.buttons[0].config(relief=GROOVE), add="+")
self.buttons[0].bind("<ButtonRelease-1>",lambda e: self.zoom("-"), add="+")
self.buttons[1].bind("<Button-1>",lambda e: self.buttons[1].config(relief=SUNKEN), add="+")
self.buttons[1].bind("<ButtonRelease-1>",lambda e: self.buttons[1].config(relief=GROOVE), add="+")
self.buttons[1].bind("<ButtonRelease-1>",lambda e: self.zoom("+"), add="+")
self.buttons[2].bind("<Button-1>",lambda e: self.buttons[2].config(relief=SUNKEN), add="+")
self.buttons[2].bind("<ButtonRelease-1>",lambda e: self.buttons[2].config(relief=GROOVE), add="+")
self.buttons[2].bind("<ButtonRelease-1>",lambda e: self.changeLocation(""), add="+")
# an optionMenu of terrains
self.terrainType = StringVar(self.parent)
self.terrainType.set(self.TERRAINS[0])
self.terrainOption = OptionMenu(self.canvas, self.terrainType, *self.TERRAINS, command=lambda e: self.changeTerrain(self.terrainType.get().lower()))
self.terrainOption.config(highlightthickness=0)
self.terrainOption.config(font=B_FONT)
# an entry for searching locations
self.locationEntry = Entry(self.canvas)
self.locationEntry.bind('<Return>', lambda e: self.changeLocation(self.location.get()))
self.location = StringVar(self.parent)
self.locationEntry.config(textvariable=self.location)
self.locationEntry.config(highlightthickness=0)
self._placeControls()
def setProxyString(self, proxyString):
self.proxyString = proxyString
def destroy(self):
self.stopUpdates()
super(GoogleMap, self).destroy()
def _removeControls(self):
self.locationEntry.place_forget()
self.terrainOption.place_forget()
self.buttons[0].place_forget()
self.buttons[1].place_forget()
self.buttons[2].place_forget()
self.buttons[3].place_forget()
def stopUpdates(self):
self.alive = False
self.parent.after_cancel(self.updateMapId)
def _placeControls(self):
self.locationEntry.place(rely=0, relx=0, x=8, y=8, anchor=NW)
self.terrainOption.place(rely=0, relx=1.0, x=-8, y=8, anchor=NE)
self.buttons[0].place(rely=1.0, relx=1.0, x=-5, y=-20, anchor=SE)
self.buttons[1].place(rely=1.0, relx=1.0, x=-5, y=-38, anchor=SE)
self.buttons[2].place(rely=1.0, relx=1.0, x=-5, y=-56, anchor=SE)
self.buttons[3].place(rely=1.0, relx=1.0, x=-5, y=-74, anchor=SE)
if self.request is not None:
self.buttons[3].registerWebpage(self.request)
self._addTooltip(self.buttons[3], self.request)
def _addTooltip(self, but, text):
# generate a tooltip
if ToolTip is not False:
tt = ToolTip(
but,
text,
delay=1000,
follow_mouse=1)
def _setMapParams(self):
if "center" not in self.params or self.params["center"] is None or self.params["center"] == "":
self.params["center"] = self.currentLocation
if "zoom" not in self.params:
self.params["zoom"] = 16
if "size" not in self.params:
self.params["size"] = "500x500"
if "format" not in self.params:
self.params["format"] = "gif"
if "maptype" not in self.params:
self.params["maptype"] = self.TERRAINS[0]
# self.params["mobile"] = "true" # optional: mobile=true will assume the image is shown on a small screen (mobile device)
self.params["sensor"] = "false" # must be given, deals with getting loction from mobile device
self.markers = []
def removeMarkers(self):
self.markers = []
self.app.thread(self.getMapData)
def removeMarker(self, label):
for p, v in enumerate(self.markers):
if v.get("label") == label:
del self.markers[p]
self.app.thread(self.getMapData)
return
def addMarker(self, location, size=None, colour=None, label=None, replace=False):
""" function to add markers, format:
&markers=color:blue|label:Z|size:tiny|location_string
"""
if size is not None:
size = size.lower().strip()
if size not in ["tiny", "mid", "small"]:
gui.warn("Invalid size: %s, for marker %s, ignoring", size, location)
size = None
if label is not None:
label = label.upper().strip()
if label not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789":
gui.warn("Invalid label: %s, for marker %s, must be a single character.", label, location)
label = None
if len(self.markers) == 0 or not replace:
self.markers.append( {"location":location, "size":size, "colour":colour, "label":label} )
else:
self.markers[-1] = {"location":location, "size":size, "colour":colour, "label":label}
self.app.thread(self.getMapData)
def saveTile(self, location):
if self.rawData is not None:
try:
with open(location, "wb") as fh:
fh.write(self.rawData)
gui.info("Map data written to file: %s", location)
return True
except Exception as e:
gui.exception(e)
return False
else:
gui.error("Unable to save map data - no data available")
return False
def setSize(self, size):
if size != self.params["size"]:
self.params["size"] = str(size).lower()
self.app.thread(self.getMapData)
def changeTerrain(self, terrainType):
terrainType = terrainType.title()
if terrainType in self.TERRAINS:
self.terrainType.set(terrainType)
if self.params["maptype"] != self.terrainType.get().lower():
self.params["maptype"] = self.terrainType.get().lower()
self.app.thread(self.getMapData)
def changeLocation(self, location):
self.location.set(location) # update the entry
if self.params["center"] != location:
self.params["center"] = location
self.app.thread(self.getMapData)
def setZoom(self, zoom):
if 0 <= zoom <= 22:
self.params["zoom"] = zoom
self.app.thread(self.getMapData)
def zoom(self, mod):
if mod == "+" and self.params["zoom"] < 22:
self.params["zoom"] += 1
self.app.thread(self.getMapData)
elif mod == "-" and self.params["zoom"] > 0:
self.params["zoom"] -= 1
self.app.thread(self.getMapData)
def updateMap(self):
if not self.alive: return
if not self.imageQueue.empty():
self.rawData = self.imageQueue.get()
self.mapData = base64.encodestring(self.rawData)
try:
imgObj = PhotoImage(data=self.mapData)
except:
gui.error("Error parsing image data")
else:
self.canvas.itemconfig(self.image_on_canvas, image=imgObj)
self.canvas.img = imgObj
h = imgObj.height()
w = imgObj.width()
if h != self.h or w != self.w:
self._removeControls()
self.h = h
self.w = w
self.canvas.config(width=self.w, height=self.h)
self._placeControls()
if self.request is not None:
self.buttons[3].registerWebpage(self.request)
self._addTooltip(self.buttons[3], self.request)
self.updateMapId = self.parent.after(200, self.updateMap)
def _buildQueryURL(self):
self.request = self.MAP_URL + urlencode(self.params)
if len(self.markers) > 0:
m = ""
for mark in self.markers:
if mark["colour"] is not None: m += "color:" + str(mark["colour"])
if mark["size"] is not None: m += "|size:" + str(mark["size"])
if mark["label"] is not None: m += "|label:" + str(mark["label"])
m += "|" + str(mark["location"])
m = quote_plus(m)
self.request += "&markers=" + m
gui.trace("GoogleMap search URL: %s", self.request)
def _buildGeoURL(self, location):
""" for future use - gets the location
"""
p = {}
p["address"] = location
p["key"] = self.API_KEY
req = self.GEO_URL + urlencode(p)
return req
def getMapData(self):
""" will query GoogleMaps & download the image data as a blob """
if self.params['center'] == "":
self.params["center"] = self.currentLocation
self._buildQueryURL()
gotMap = False
while not gotMap:
if self.request is not None:
if self.proxyString is not None:
gui.error("Proxy set, but not enabled.")
try:
u = urlopen(self.request)
rawData = u.read()
u.close()
self.imageQueue.put(rawData)
gotMap = True
except Exception as e:
gui.error("Unable to contact GoogleMaps")
time.sleep(1)
else:
gui.trace("No request")
time.sleep(.25)
def getMapFile(self, fileName):
""" will query GoogleMaps & download the image into the named file """
self._buildQueryURL()
self.buttons[3].registerWebpage(self.request)
try:
urlretrieve(self.request, fileName)
return fileName
except Exception as e:
gui.error("Unable to contact GoogleMaps")
return None
def setCurrentLocation(self):
gui.trace("Location request URL: %s", self.LOCATION_URL)
try:
self.currentLocation = self._locationLookup()
except Exception as e:
gui.error("Unable to contact location server, using default: %s", self.defaultLocation)
self.currentLocation = self.defaultLocation
def _locationLookup(self):
u = urlopen(self.LOCATION_URL)
data = u.read().decode("utf-8")
u.close()
gui.trace("Location data: %s", data)
data = json.loads(data)
# location = data["loc"]
location = str(data["latitude"]) + "," + str(data["longitude"])
return location
#####################################
class CanvasDnd(Canvas, object):
"""
A canvas to which we have added those methods necessary so it can
act as both a TargetWidget and a TargetObject.
Use (or derive from) this drag-and-drop enabled canvas to create anything
that needs to be able to receive a dragged object.
"""
def __init__(self, Master, cnf={}, **kw):
if cnf:
kw.update(cnf)
super(CanvasDnd, self).__init__(Master, kw)
self.config(bd=0, highlightthickness=0)
#----- TargetWidget functionality -----
def dnd_accept(self, source, event):
#Tkdnd is asking us (the TargetWidget) if we want to tell it about a
# TargetObject. Since CanvasDnd is also acting as TargetObject we
# return 'self', saying that we are willing to be the TargetObject.
gui.trace("<<%s .dnd_accept>> %s", type(self), source)
return self
#----- TargetObject functionality -----
# This is called when the mouse pointer goes from outside the
# Target Widget to inside the Target Widget.
def dnd_enter(self, source, event):
gui.trace("<<%s .dnd_enter>> %s", type(self), source)
XY = gui.MOUSE_POS_IN_WIDGET(self, event)
# show the dragged object
source.appear(self ,XY)
# This is called when the mouse pointer goes from inside the
# Target Widget to outside the Target Widget.
def dnd_leave(self, source, event):
gui.trace("<<%s .dnd_leave>> %s", type(self), source)
# hide the dragged object
source.vanish()
#This is called when the mouse pointer moves withing the TargetWidget.
def dnd_motion(self, source, event):
gui.trace("<<%s .dnd_motion>> %s", type(self), source)
XY = gui.MOUSE_POS_IN_WIDGET(self,event)
# move the dragged object
source.move(self, XY)
#This is called if the DraggableWidget is being dropped on us.
def dnd_commit(self, source, event):
gui.trace("<<%s .dnd_commit>> %s", type(self), source)
# A canvas specifically for deleting dragged objects.
class TrashBin(CanvasDnd, object):
def __init__(self, master, **kw):
if "width" not in kw:
kw['width'] = 150
if "height" not in kw:
kw['height'] = 25
super(TrashBin, self).__init__(master, kw)
self.config(relief="sunken", bd=2)
x = kw['width'] / 2
y = kw['height'] / 2
self.textId = self.create_text(x, y, text='TRASH', anchor="center")
def dnd_commit(self, source, event):
gui.trace("<<TRASH_BIN.dnd_commit>> vanishing source")
source.vanish(True)
def config(self, **kw):
self.configure(**kw)
def configure(self, **kw):
kw = gui.CLEAN_CONFIG_DICTIONARY(**kw)
if "fg" in kw:
fg=kw.pop('fg')
self.itemconfigure(self.textId, fill=fg)
super(TrashBin, self).config(**kw)
# This is a prototype thing to be dragged and dropped.
class DraggableWidget(object):
discardDragged = False
def dnd_accept(self, source, event):
return None
def __init__(self, parent, title, name, XY, widg=None):
self.parent = parent
gui.trace("<<DRAGGABLE_WIDGET.__init__>>")
#When created we are not on any canvas
self.Canvas = None
self.OriginalCanvas = None
self.widg = widg
#This sets where the mouse cursor will be with respect to our label
self.OffsetCalculated = False
self.OffsetX = XY[0]
self.OffsetY = XY[1]
# give ourself a name
self.Name = name
self.Title = title
self.OriginalID = None
self.dropTarget = None
# this gets called when we are dropped
def dnd_end(self, target, event):
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> %s target=%s", self.Name, target)
# from somewhere, dropped nowhere - self destruct, or put back
if self.Canvas is None:
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> dropped with Canvas (None)")
if DraggableWidget.discardDragged:
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> DISCARDING under order")
else:
if self.OriginalCanvas is not None:
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> RESTORING")
self.restoreOldData()
self.Canvas.dnd_enter(self, event)
else:
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> DISCARDING as nowhere to go")
# have been dropped somewhere
else:
gui.trace("<<DRAGGABLE_WIDGET.dnd_end>> dropped with Canvas(%s) Target=%s", self.Canvas, self.dropTarget)
if not self.dropTarget:
# make the dragged object re-draggable
self.Label.bind('<ButtonPress>', self.press)
else:
if self.dropTarget.keepWidget(self.Title, self.Name):
self.Label.bind('<ButtonPress>', self.press)
else:
self.vanish(True)
# delete any old widget
if self.OriginalCanvas:
self.OriginalCanvas.delete(self.OriginalID)
self.OriginalCanvas = None
self.OriginalID = None
self.OriginalLabel = None
# put a label representing this DraggableWidget instance on Canvas.
def appear(self, canvas, XY):
if not isinstance(canvas, CanvasDnd):
self.dropTarget = canvas
canvas = canvas.dnd_canvas
# else:
# self.dropTarget = None
if self.Canvas:
gui.trace("<<DRAGGABLE_WIDGET.appear> - ignoring, as we already exist?: %s %s", canvas, XY)
return
else:
gui.trace("<<DRAGGABLE_WIDGET.appear> - appearing: %s %s", canvas, XY)
self.Canvas = canvas
self.X, self.Y = XY
self.Label = Label(self.Canvas, text=self.Name, borderwidth=2, relief=RAISED)
# Offsets are received as percentages from initial button press
# so calculate Offset from a percentage
if not self.OffsetCalculated:
self.OffsetX = self.Label.winfo_reqwidth() * self.OffsetX
self.OffsetY = self.Label.winfo_reqheight() * self.OffsetY
self.OffsetCalculated = True
self.ID = self.Canvas.create_window(self.X-self.OffsetX, self.Y-self.OffsetY, window=self.Label, anchor="nw")
gui.trace("<<DRAGGABLE_WIDGET.appear> - created: %s %s", self.Label, self.Canvas)
# if there is a label representing us on a canvas, make it go away.
def vanish(self, all=False):
# if we had a canvas, delete us
if self.Canvas:
gui.trace("<<DRAGGABLE_WIDGET.vanish> - vanishing")
self.storeOldData()
self.Canvas.delete(self.ID)
self.Canvas = None
del self.ID
del self.Label
else:
gui.trace("<<DRAGGABLE_WIDGET.vanish>> ignoring")
if all and self.OriginalCanvas:
gui.trace("<<DRAGGABLE_WIDGET.vanish>> restore original")
self.OriginalCanvas.delete(self.OriginalID)
self.OriginalCanvas = None
del self.OriginalID
del self.OriginalLabel
# if we have a label on a canvas, then move it to the specified location.
def move(self, widget, XY):
gui.trace("<<DRAGGABLE_WIDGET.move>> %s %s", self.Canvas, XY)
if self.Canvas:
self.X, self.Y = XY
self.Canvas.coords(self.ID, self.X-self.OffsetX, self.Y-self.OffsetY)
else:
gui.error("<<DRAGGABLE_WIDGET.move>> unable to move - NO CANVAS!")
def press(self, event):
gui.trace("<<DRAGGABLE_WIDGET.press>>")
self.storeOldData()
self.ID = None
self.Canvas = None
self.Label = None
#Ask Tkdnd to start the drag operation
if INTERNAL_DND.dnd_start(self, event):
self.OffsetX, self.OffsetY = gui.MOUSE_POS_IN_WIDGET(self.OriginalLabel, event, False)
XY = gui.MOUSE_POS_IN_WIDGET(self.OriginalCanvas, event, False)
self.appear(self.OriginalCanvas, XY)
def storeOldData(self, phantom=False):
gui.trace("<<DRAGGABLE_WIDGET.storeOldData>>")
self.OriginalID = self.ID
self.OriginalLabel = self.Label
self.OriginalText = self.Label['text']
self.OriginalCanvas = self.Canvas
if phantom:
gui.trace("<<DRAGGABLE_WIDGET.storeOldData>> keeping phantom")
self.OriginalLabel["text"] = "<Phantom>"
self.OriginalLabel["relief"] = RAISED
else:
gui.trace("<<DRAGGABLE_WIDGET.storeOldData>> hiding phantom")
self.OriginalCanvas.delete(self.OriginalID)
def restoreOldData(self):
if self.OriginalID:
gui.trace("<<DRAGGABLE_WIDGET.restoreOldData>>")
self.ID = self.OriginalID
self.Label = self.OriginalLabel
self.Label['text'] = self.OriginalText
self.Label['relief'] = RAISED
self.Canvas = self.OriginalCanvas
self.OriginalCanvas.itemconfigure(self.OriginalID, state='normal')
self.Label.bind('<ButtonPress>', self.press)
else:
gui.trace("<<DRAGGABLE_WIDGET.restoreOldData>> unable to restore - NO OriginalID")
#########################################
# Enum & WidgetManager - used to store widget lists
#########################################
class WidgetManager(object):
""" used to keep track of all widgets in the GUI
creates a dictionary for each widget type on demand
provides functions for accessing widgets """
WIDGETS = "widgets"
VARS = "vars"
def __init__(self):
self.widgets = {}
self.vars = {}
def reset(self, keepers):
newWidg = {}
newVar = {}
for key in keepers:
if key in self.widgets:
newWidg[key] = self.widgets[key]
if key in self.vars:
newVar[key] = self.vars[key]
self.widgets = newWidg
self.vars = newVar
def group(self, widgetType, group=None, array=False):
"""
returns the list/dictionary containing the specified widget type
will create a new group if none exists
"""
if group is None: container = self.widgets
elif group == WidgetManager.VARS: container = self.vars
try:
widgGroup = container[widgetType]
except KeyError:
if array: widgGroup = []
else: widgGroup = {}
container[widgetType] = widgGroup
return widgGroup
def add(self, widgetType, widgetName, widget, group=None):
""" adds items to the specified dictionary """
widgGroup = self.group(widgetType, group)
if widgetName in widgGroup:
raise ItemLookupError("Duplicate key: '" + widgetName + "' already exists")
else:
widgGroup[widgetName] = widget
widget.APPJAR_TYPE = widgetType
def log(self, widgetType, widget, group=None):
""" Used for adding items to an array """
widgGroup = self.group(widgetType, group, array=True)
widgGroup.append(widget)
try: widget.APPJAR_TYPE = widgetType
except AttributeError: pass # not available on some classes
def verify(self, widgetType, widgetName, group=None, array=False):
""" checks for duplicatres """
if widgetName in self.group(widgetType, group, array):
raise ItemLookupError("Duplicate widgetName: " + widgetName)
def get(self, widgetType, widgetName, group=None):
""" gets the specified item """
try:
return self.group(widgetType, group)[widgetName]
except KeyError:
raise ItemLookupError("Invalid widgetName: " + widgetName)
def update(self, widgetType, widgetName, widget, group=None):
""" gets the specified item """
try:
self.group(widgetType, group)[widgetName] = widget
except KeyError:
raise ItemLookupError("Invalid widgetName: '" + widgetName)
def check(self, widgetType, widgetName, group=None):
""" used for arrays - checks if the item is in the array """
try:
if widgetName in self.group(widgetType, group): return True
else: raise ItemLookupError("Invalid widgetName: '" + widgetName)
except KeyError:
raise ItemLookupError("Invalid widgetName: '" + widgetName)
def remove(self, widgetType, widgetName, group=None):
widgGroup = self.group(widgetType, group)
if type(widgGroup) == list:
widgGroup.remove(widgetName)
else:
del widgGroup[widgetName]
# delete a linked var
if group != self.VARS:
try: del self.group(widgetType, self.VARS)[widgetName]
except: pass
def clear(self, widgetType, group=None):
if group is None: container = self.widgets
elif group == WidgetManager.VARS: container = self.vars
if isinstance(self.group(widgetType, group), dict):
container[widgetType] = {}
else:
container[widgetType] = []
# function to loop through a config dict/list and remove matching object
def destroyWidget(self, widgType, widget):
widgets = self.widgets[widgType]
# just a list, remove matching obj - no vars
if type(widgets) in (list, tuple):
for obj in widgets:
if widget == obj:
obj.destroy()
widgets.remove(obj)
gui.trace("Matched and removed")
return True
else:
for name, obj in widgets.items():
if type(obj) in (list, tuple):
if self.destroyWidget(widget, obj):
if len(obj) == 0:
del widgets[name]
try: del self.vars[widgType][name]
except: pass # no var
return True
elif widget == obj:
obj.destroy()
del widgets[name]
try: del self.vars[widgType][name]
except: pass # no var
gui.trace("Matched and destroyed")
return True
gui.trace("Failed to destory widget")
return False
class Enum(object):
""" class to emulate enum type - works in all python versions
also provides some extra functions """
__initialized = False
def __init__(self, widgets, excluded, keepers):
self.widgets = widgets
self.excluded = excluded
self.keepers = []
for k in keepers:
self.keepers.append(self.get(k))
self.funcList = []
for w in self.widgets:
if w not in self.excluded:
self.funcList.append(w)
self.__initialized = True
def __getattr__(self, name):
return self.get(name)
def get(self, name):
try: return self.widgets.index(name)
except: raise KeyError("Invalid key: " + str(name))
def getIgnoreCase(self, name):
name = name.upper()
for w in self.widgets:
if w.upper() == name:
return self.widgets.index(w)
else:
raise KeyError("Invalid key: " + str(name))
def __setattr__(self, name, value):
if self.__initialized: raise Exception("Unable to change Widget store")
else: super(Enum, self).__setattr__(name, value)
def __delattr__(self, name):
raise Exception("Unable to delete from Widget store")
def name(self, i):
"""Get the real name of the widget"""
return self.widgets[i]
def funcs(self):
""" Get a list of names to use as functions """
return self.funcList
#####################################
# MAIN - for testing
#####################################
if __name__ == "__main__":
print("This is a library class and cannot be executed.")
sys.exit()
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from typing import List
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT, MIN_AMOUNT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext
from electroncash.util import (format_time, format_spocks, PrintError,
format_spocks_plain, NotEnoughFunds,
ExcessiveFee, UserCancelled, InvalidPassword,
bh2u, bfh, format_fee_spocks, Weak,
print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands, cashacct
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
from electroncash.contacts import Contact
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from . import cashacctqt
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
ca_address_default_changed_signal = pyqtSignal(object) # passes cashacct.Info object to slot, which is the new default. Mainly emitted by address_list and address_dialog
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
assert self.wallet and self.config and self.gui_object
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.cashaddr_toggled_signal = self.gui_object.cashaddr_toggled_signal # alias for backwards compatibility for plugins -- this signal used to live in each window and has since been refactored to gui-object where it belongs (since it's really an app-global setting)
self.force_use_single_change_addr = None # this is set by the CashShuffle plugin to a single string that will go into the tool-tip explaining why this preference option is disabled (see self.settings_dialog)
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+Q"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee', 'ca_verified_tx', 'ca_verification_failed']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
#
#try:
# # Amaury's recommendation -- only remind a subset of users to enable it.
# self.remind_cashshuffle_enabled = bool(int.from_bytes(bytes.fromhex(self.wallet.get_public_key(self.wallet.get_addresses()[0])), byteorder='big') & 0x3)
#except (AttributeError, ValueError, TypeError):
# # wallet lacks the get_public_key method
# self.remind_cashshuffle_enabled = False
self.remind_cashshuffle_enabled = False # For now globally disabled
#QTimer.singleShot(300, lambda: weakSelf() and weakSelf().do_cash_shuffle_reminder())
#
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
if args[0] is self.wallet:
self.network_signal.emit(event, args)
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
elif event in ('ca_verified_tx', 'ca_verification_failed'):
if args[0] is self.wallet.cashacct:
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
elif event in ('ca_verified_tx', 'ca_verification_failed'):
pass
elif event == 'verified2':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error, name = wallet.diagnostic_name() + '/Wallet')
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend DeVault with it."),
_("Make sure you own the seed phrase or the private keys, before you request DeVault to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of DeLight before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
self.wallet.storage.write() # make sure file is committed to disk
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("DeLight was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild history"), self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("&Scan beyond gap..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export"), self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("&Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
prefs_tit = _("DeLight preferences") if sys.platform == 'darwin' else _("Preferences")
tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") )
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network"), lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features"), self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins"), self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform.startswith('linux'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware wallet support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("From &file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("From &text"), self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &blockchain"), self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
tools_menu.addSeparator()
if ColorScheme.dark_scheme and sys.platform != 'darwin': # use dark icon in menu except for on macOS where we can't be sure it will look right due to the way menus work on macOS
icon = QIcon(":icons/cashacct-button-darkmode.png")
else:
icon = QIcon(":icons/cashacct-logo.png")
tools_menu.addAction(icon, _("Lookup &DeVault ID..."), self.lookup_cash_account_dialog, QKeySequence("Ctrl+L"))
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for updates..."), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official website"), lambda: webopen("https://devault.cc"))
help_menu.addSeparator()
# help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{}:{}?message=donation for {}'
.format(networks.net.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "DeLight",
"<p><font size=+3><b>DeLight</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2019<br>The DeVault Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2017-2019<br>Electron Cash LLC & The Electron Cash Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
_("DeLight's focus is speed, with low resource usage and simplifying DeVault. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the DeVault system.") +
"</span></p>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/devaultcrypto/DeLight/issues\">https://github.com/devaultcrypto/DeLight/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of DeLight (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="DeLight - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_spocks(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_spocks(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
extra = run_hook("balance_label_extra", self)
if isinstance(extra, str) and extra:
text += " [{}]".format(extra)
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
self.update_cashshuffle_icon()
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('DeVault address where the payment should be received. Note that each payment request uses a different DeVault address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.gui_object.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
# DeVault ID for this address (if any)
msg = _("The DeVault ID (if any) associated with this address. It doesn't get saved with the request, but it is shown here for your convenience.\n\nYou may use the DeVault IDs button to register a new DeVault ID for this address.")
label = HelpLabel(_('DeVault ID'), msg)
class CashAcctE(ButtonsLineEdit):
my_network_signal = pyqtSignal(str, object)
''' Inner class encapsulating the DeVault ID Edit.s
Note:
- `slf` in this class is this instance.
- `self` is wrapping class instance. '''
def __init__(slf, *args):
super().__init__(*args)
slf.font_default_size = slf.font().pointSize()
icon = ":icons/cashacct-button-darkmode.png" if ColorScheme.dark_scheme else ":icons/cashacct-logo.png"
slf.ca_but = slf.addButton(icon, self.register_new_cash_account, _("Register a new DeVault ID for this address"))
slf.ca_copy_b = slf.addCopyButton()
slf.setReadOnly(True)
slf.info = None
slf.cleaned_up = False
self.network_signal.connect(slf.on_network_qt)
slf.my_network_signal.connect(slf.on_network_qt)
if self.wallet.network:
self.wallet.network.register_callback(slf.on_network, ['ca_updated_minimal_chash'])
def clean_up(slf):
slf.cleaned_up = True
if self.wallet.network:
self.wallet.network.unregister_callback(slf.on_network)
def set_cash_acct(slf, info: cashacct.Info = None, minimal_chash = None):
if not info and self.receive_address:
minimal_chash = None
ca_list = self.wallet.cashacct.get_cashaccounts(domain=[self.receive_address])
ca_list.sort(key=lambda x: ((x.number or 0), str(x.collision_hash)))
info = self.wallet.cashacct.get_address_default(ca_list)
if info:
slf.ca_copy_b.setDisabled(False)
f = slf.font(); f.setItalic(False); f.setPointSize(slf.font_default_size); slf.setFont(f)
slf.setText(info.emoji + " " + self.wallet.cashacct.fmt_info(info, minimal_chash=minimal_chash))
else:
slf.setText(_("None"))
f = slf.font(); f.setItalic(True); f.setPointSize(slf.font_default_size-1); slf.setFont(f)
slf.ca_copy_b.setDisabled(True)
slf.info = info
def on_copy(slf):
''' overrides super class '''
QApplication.instance().clipboard().setText(slf.text()[3:] + ' ' + slf.text()[:1]) # cut off the leading emoji, and add it to the end
QToolTip.showText(QCursor.pos(), _("DeVault ID copied to clipboard"), slf)
def on_network_qt(slf, event, args=None):
''' pick up cash account changes and update receive tab. Called
from GUI thread. '''
if not args or self.cleaned_up or slf.cleaned_up or args[0] != self.wallet.cashacct:
return
if event == 'ca_verified_tx' and self.receive_address and self.receive_address == args[1].address:
slf.set_cash_acct()
elif event == 'ca_updated_minimal_chash' and slf.info and slf.info.address == args[1].address:
slf.set_cash_acct()
def on_network(slf, event, *args):
if event == 'ca_updated_minimal_chash' and args[0] == self.wallet.cashacct:
slf.my_network_signal.emit(event, args)
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
slf.set_cash_acct()
self.cash_account_e = CashAcctE()
label.setBuddy(self.cash_account_e)
grid.addWidget(label, 1, 0)
grid.addWidget(self.cash_account_e, 1, 1, 1, -1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
label = QLabel(_('Requested &amount'))
label.setBuddy(self.receive_amount_e)
grid.addWidget(label, 4, 0)
grid.addWidget(self.receive_amount_e, 4, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 4, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding DeVault addresses.'),
_('The DeVault address never expires and will always be part of this DeLight wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 5, 0)
grid.addWidget(self.expires_combo, 5, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 5, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(slf, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 6, 2, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
wslf = weakSelf()
if wslf:
wslf.check_and_reset_receive_address_if_needed()
w = ReceiveTab()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
self.cash_account_e.set_cash_acct()
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit devault: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
else:
# Otherwise proceed as normal, prepending devault: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> DeVault <b>Address</b> <b>★</b>"
"<li> <b>DeVault ID</b> <b>★</b> e.g. <i>satoshi#123</i>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
self.payto_e.addButton(icon_name = qmark, on_click = payto_label.show_help,
tooltip = _('Show help'), index = 0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the DVT blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('&Amount'), msg)
amount_label.setBuddy(self.amount_e)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('DeVault transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, name):
item = self.from_list.currentItem()
if (item and item.data(0, Qt.UserRole) == name
and not item.data(0, Qt.UserRole+1) ):
i = self.from_list.indexOfTopLevelItem(item)
try:
self.pay_from.pop(i)
except IndexError:
# The list may contain items not in the pay_from if added by a
# plugin using the spendable_coin_filter hook
pass
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
name = item.data(0, Qt.UserRole)
action = menu.addAction(_("Remove"), lambda: self.from_list_delete(name))
if item.data(0, Qt.UserRole+1):
action.setText(_("Not Removable"))
action.setDisabled(True)
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self, *, spendable=None):
''' Optional kwarg spendable indicates *which* of the UTXOs in the
self.pay_from list are actually spendable. If this arg is specifid,
coins in the self.pay_from list that aren't also in the 'spendable' list
will be grayed out in the UI, to indicate that they will not be used.
Otherwise all coins will be non-gray (default).
(Added for CashShuffle 02/23/2019) '''
sel = self.from_list.currentItem() and self.from_list.currentItem().data(0, Qt.UserRole)
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def name(x):
return "{}:{}".format(x['prevout_hash'], x['prevout_n'])
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
def new(item, is_unremovable=False):
ret = QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])
ret.setData(0, Qt.UserRole, name(item))
ret.setData(0, Qt.UserRole+1, is_unremovable)
return ret
for item in self.pay_from:
twi = new(item)
if spendable is not None and item not in spendable:
grayify(twi)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
if spendable is not None: # spendable may be None if no plugin filtered coins.
for item in spendable:
# append items added by the plugin to the spendable list
# at the bottom. These coins are marked as "not removable"
# in the UI (the plugin basically insisted these coins must
# be spent with the other coins in the list for privacy).
if item not in self.pay_from:
twi = new(item, True)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
def get_contact_payto(self, contact : Contact) -> str:
assert isinstance(contact, Contact)
_type, label = contact.type, contact.name
emoji_str = ''
mod_type = _type
mine_str = ''
if _type.startswith('cashacct'): # picks up cashacct and the cashacct_W pseudo-contacts
if _type == 'cashacct_T':
# temporary "pending verification" registration pseudo-contact. Never offer it as a completion!
return None
mod_type = 'cashacct'
info = self.wallet.cashacct.get_verified(label)
if info:
emoji_str = f' {info.emoji}'
if _type == 'cashacct_W':
mine_str = ' [' + _('Mine') + '] '
else:
self.print_error(label, "not found")
# could not get verified contact, don't offer it as a completion
return None
elif _type == 'openalias':
return contact.address
return label + emoji_str + ' ' + mine_str + '<' + contact.address + '>' if mod_type in ('address', 'cashacct') else None
def update_completions(self):
l = []
for contact in self.contact_list.get_full_contacts(include_pseudo=True):
s = self.get_contact_payto(contact)
if s is not None: l.append(s)
l.sort(key=lambda x: x.lower()) # case-insensitive sort
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 500 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if (fee < MIN_AMOUNT): fee = MIN_AMOUNT
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
status = True
else:
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_spocks_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = self.payment_request and self.payment_request.error
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid devault URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.payto_e.is_alias, self.payto_e.validated = False, False # clear flags to avoid bad things
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.payto_e.setHidden(False)
self.payto_label.setHidden(False)
self.max_button.setDisabled(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_rawhex_cb.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.gui_object.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
coins = []
if self.pay_from:
coins = self.pay_from.copy()
else:
coins = self.wallet.get_spendable_coins(None, self.config, isInvoice)
run_hook("spendable_coin_filter", self, coins) # may modify coins -- used by CashShuffle if in shuffle = ENABLED mode.
if self.pay_from:
# coins may have been filtered, so indicate this in the UI
self.redraw_from_list(spendable=coins)
return coins
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
run_hook('on_spend_coins', self, coins) # CashShuffle: will set the mode of send tab to coins[0]'s shuffled/unshuffled state
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, contacts : List[Contact]):
paytos = []
for contact in contacts:
s = self.get_contact_payto(contact)
if s is not None: paytos.append(s)
self.payto_payees(paytos)
def payto_payees(self, payees : List[str]):
''' Like payto_contacts except it accepts a list of free-form strings
rather than requiring a list of Contacts objects '''
self.show_send_tab()
if len(payees) == 1:
self.payto_e.setText(payees[0])
self.amount_e.setFocus()
else:
text = "\n".join([payee + ", 0" for payee in payees])
self.payto_e.setText(text)
self.payto_e.setFocus()
def resolve_cashacct(self, name):
''' Throws up a WaitingDialog while it resolves a DeVault ID.
Goes out to network, verifies all tx's.
Returns: a tuple of: (Info, Minimally_Encoded_Formatted_AccountName)
Argument `name` should be a DeVault ID name string of the form:
name#number.123
name#number
name#number.; etc
If the result would be ambigious, that is considered an error, so enough
of the account name#number.collision_hash needs to be specified to
unambiguously resolve the DeVault ID.
On failure throws up an error window and returns None.'''
return cashacctqt.resolve_cashacct(self, name)
def set_contact(self, label, address, typ='address', replace=None) -> Contact:
''' Returns a reference to the newly inserted Contact object.
replace is optional and if specified, replace an existing contact,
otherwise add a new one.
Note that duplicate contacts will not be added multiple times, but in
that case the returned value would still be a valid Contact.
Returns None on failure.'''
assert typ in ('address', 'cashacct')
contact = None
if typ == 'cashacct':
tup = self.resolve_cashacct(label) # this displays an error message for us
if not tup:
self.contact_list.update() # Displays original
return
info, label = tup
address = info.address.to_ui_string()
contact = Contact(name=label, address=address, type=typ)
elif not Address.is_valid(address):
# Bad 'address' code path
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return
else:
# Good 'address' code path...
contact = Contact(name=label, address=address, type=typ)
assert contact
if replace != contact:
if self.contacts.has(contact):
self.show_error(_(f"A contact named {contact.name} with the same address and type already exists."))
self.contact_list.update()
return replace or contact
self.contacts.add(contact, replace_old=replace, unique=True)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact2', contact, replace)
return contact
def delete_contacts(self, contacts):
n = len(contacts)
qtext = ''
if n <= 3:
def fmt(contact):
if len(contact.address) > 20:
addy = contact.address[:10] + '…' + contact.address[-10:]
else:
addy = contact.address
return f"{contact.name} <{addy}>"
names = [fmt(contact) for contact in contacts]
contact_str = ", ".join(names)
qtext = _("Remove {list_of_contacts} from your contact list?").format(list_of_contacts = contact_str)
else:
# Note: we didn't use ngettext here for plural check because n > 1 in this branch
qtext = _("Remove {number_of_contacts} contacts from your contact list?").format(number_of_contacts=n)
if not self.question(qtext):
return
removed_entries = []
for contact in contacts:
if self.contacts.remove(contact):
removed_entries.append(contact)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts2', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'devault':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}+F to hide").format(key='Ctrl' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("A DeLight update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.cashshuffle_status_button = StatusBarButton(
self.cashshuffle_icon(),
'', # ToolTip will be set in update_cashshuffle code
self.cashshuffle_icon_leftclick
)
self.cashshuffle_toggle_action = QAction("", self.cashshuffle_status_button) # action text will get set in update_cashshuffle_icon()
self.cashshuffle_toggle_action.triggered.connect(self.toggle_cashshuffle)
self.cashshuffle_settings_action = QAction("", self.cashshuffle_status_button)
self.cashshuffle_settings_action.triggered.connect(self.show_cashshuffle_settings)
self.cashshuffle_viewpools_action = QAction(_("View pools..."), self.cashshuffle_status_button)
self.cashshuffle_viewpools_action.triggered.connect(self.show_cashshuffle_pools)
self.cashshuffle_status_button.addAction(self.cashshuffle_viewpools_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_settings_action)
self.cashshuffle_separator_action = sep = QAction(self.cashshuffle_status_button); sep.setSeparator(True)
self.cashshuffle_status_button.addAction(sep)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.setContextMenuPolicy(Qt.ActionsContextMenu)
sb.addPermanentWidget(self.cashshuffle_status_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak.ref(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf()))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only() and not self.payto_e.cointext)
self.preview_button.setVisible(not self.payto_e.cointext)
self.cointext_button.setVisible(bool(self.payto_e.cointext))
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
run_hook("on_new_password", self, password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(350)
line2 = QLineEdit()
line2.setFixedWidth(350)
grid.addWidget(QLabel(_("Name")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Address")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
name = line1.text().strip()
address = line2.text().strip()
prefix = networks.net.CASHADDR_PREFIX.lower() + ':'
if address.lower().startswith(prefix):
address = address[len(prefix):]
self.set_contact(name, address)
def lookup_cash_account_dialog(self):
blurb = "<br><br>" + _('Enter a string of the form <b>name#<i>number</i></b>')
cashacctqt.lookup_cash_account_dialog(self, self.wallet, blurb=blurb,
add_to_contacts_button = True, pay_to_button = True)
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self.top_level_window(), seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in DeLight, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid DeVault address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid DeVault address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("DeLight was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a devault URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
import traceback
traceback.print_exc()
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("DeLight was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("DeLight was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("DeLight was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None, tx_desc=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx, tx_desc=tx_desc)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self.top_level_window(), _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
try:
privkey = self.wallet.export_private_key(addr, password)
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("DeLight was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("DeLight was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("DeLight was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("DeLight was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance']]
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def toggle_cashaddr_status_bar(self):
self.gui_object.toggle_cashaddr()
self.statusBar().showMessage(self.cashaddr_status_tip(), 2000)
def toggle_cashaddr_settings(self, state):
self.gui_object.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.print_error('*** WARNING ElectrumWindow.toggle_cashaddr: This function is deprecated. Please do not call it!')
self.gui_object.toggle_cashaddr(on)
def cashshuffle_plugin_if_loaded(self):
return self.gui_object.plugins.get_internal_plugin("shuffle", force_load = False)
def is_cashshuffle_enabled(self):
plugin = self.cashshuffle_plugin_if_loaded()
return bool(plugin and plugin.is_enabled() and plugin.window_has_cashshuffle(self))
def cashshuffle_icon(self):
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag == 1:
return QIcon(":icons/cashshuffle_on_error.svg")
else:
return QIcon(":icons/cashshuffle_on.svg")
else:
self._cash_shuffle_flag = 0
return QIcon(":icons/cashshuffle_off.svg")
def update_cashshuffle_icon(self):
self.cashshuffle_status_button.setIcon(self.cashshuffle_icon())
loaded = bool(self.cashshuffle_plugin_if_loaded())
en = self.is_cashshuffle_enabled()
if self._cash_shuffle_flag == 0:
self.cashshuffle_status_button.setStatusTip(_("CashShuffle") + " - " + _("ENABLED") if en else _("CashShuffle") + " - " + _("Disabled"))
rcfcm = _("Right-click for context menu")
self.cashshuffle_status_button.setToolTip(
(_("Toggle CashShuffle") + "\n" + rcfcm)
#(_("Left-click to view pools") + "\n" + rcfcm) if en
#else (_("Toggle CashShuffle") + "\n" + rcfcm)
)
self.cashshuffle_toggle_action.setText(_("Enable CashShuffle") if not en else _("Disable CashShuffle"))
self.cashshuffle_settings_action.setText(_("CashShuffle Settings..."))
self.cashshuffle_viewpools_action.setEnabled(True)
elif self._cash_shuffle_flag == 1: # Network server error
self.cashshuffle_status_button.setStatusTip(_('CashShuffle Error: Could not connect to server'))
self.cashshuffle_status_button.setToolTip(_('Right-click to select a different CashShuffle server'))
self.cashshuffle_settings_action.setText(_("Resolve Server Problem..."))
self.cashshuffle_viewpools_action.setEnabled(False)
self.cashshuffle_settings_action.setVisible(en or loaded)
self.cashshuffle_viewpools_action.setVisible(en)
if en:
# ensure 'Disable CashShuffle' appears at the end of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
else:
# ensure 'Enable CashShuffle' appears at the beginning of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
actions = self.cashshuffle_status_button.actions()
self.cashshuffle_status_button.insertAction(actions[0] if actions else None, self.cashshuffle_separator_action)
self.cashshuffle_status_button.insertAction(self.cashshuffle_separator_action, self.cashshuffle_toggle_action)
def show_cashshuffle_settings(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
msg = None
if self._cash_shuffle_flag == 1:
# had error
msg = _("There was a problem connecting to this server.\nPlease choose a different CashShuffle server.")
p.settings_dialog(self, msg)
#else: # commented-out. Enable this if you want to use the non-modal network settings as the destination for this action
# # no error -- use the free-floating non-modal network dialog
# if not p.show_cashshuffle_tab_in_network_dialog(self):
# # Huh. Network dialog creation/show failed. Fall back to modal window
# p.settings_dialog(self, msg)
def show_cashshuffle_pools(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.view_pools(self)
def cashshuffle_icon_leftclick(self):
self.toggle_cashshuffle()
return
# delete the above 2 lines if we want the left-click to revert to
# Josh's suggestion (leaving the code in here for now)
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag != 0:
# Jump to settings.
self.cashshuffle_settings_action.trigger()
return
if self.cashshuffle_viewpools_action.isVisible():
# New! We just let this icon be the "View pools..." action when
# the plugin is already loaded and enabled. This hopefully will
# discourage disabling. Also it's been found that "View pools..."
# is the most popular action anyway -- might as well make it
# convenient to access with 1-click. (@zquestz suggested this)
self.cashshuffle_viewpools_action.trigger()
return
#else... in all other cases just toggle cashshuffle
self.toggle_cashshuffle()
def toggle_cashshuffle(self):
if not self.is_wallet_cashshuffle_compatible():
self.show_warning(_("This wallet type cannot be used with CashShuffle."), parent=self)
return
plugins = self.gui_object.plugins
p0 = self.cashshuffle_plugin_if_loaded()
p = p0 or plugins.enable_internal_plugin("shuffle")
if not p:
raise RuntimeError("Could not find CashShuffle plugin")
was_enabled = p.window_has_cashshuffle(self)
if was_enabled and not p.warn_if_shuffle_disable_not_ok(self):
# user at nag screen said "no", so abort
self.update_cashshuffle_icon()
return
enable_flag = not was_enabled
self._cash_shuffle_flag = 0
KillPopupLabel("CashShuffleError")
if not p0:
# plugin was not loaded -- so flag window as wanting cashshuffle and do init
p.window_set_wants_cashshuffle(self, enable_flag)
p.init_qt(self.gui_object)
else:
# plugin was already started -- just add the window to the plugin
p.window_set_cashshuffle(self, enable_flag)
self.update_cashshuffle_icon()
self.statusBar().showMessage(self.cashshuffle_status_button.statusTip(), 3000)
if enable_flag and self.config.get("show_utxo_tab") is None:
self.toggle_tab(self.utxo_tab) # toggle utxo tab to 'on' if user never specified it should be off.
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self.top_level_window(), _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = util.base_unit_labels # ( 'DVT', 'mDVT', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 DVT = 1,000 mDVT = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_combo.addItem(_("Default"),"default")
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
system_cameras = QCameraInfo.availableCameras()
qr_label = HelpLabel(_('Video Device') + ':', _("For scanning Qr codes."))
except ImportError as e:
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label = HelpLabel(_('Video Device') + ' ' + _('(disabled)') + ':', qr_combo.toolTip() + "\n\n" + str(e))
qr_label.setToolTip(qr_combo.toolTip())
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = qr_combo.findData(video_device)
qr_combo.setCurrentIndex(video_device_index)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of DeLight becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Enable Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(gui_widgets, _('General')),
(fee_widgets, _('Fees')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart DeLight to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal") and attr_name != "cashaddr_toggled_signal":
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
for w in [self.address_list, self.history_list, self.utxo_list, self.cash_account_e, self.contact_list]:
if w: w.clean_up() # tell relevant widget to clean itself up, unregister callbacks, etc
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(parent=self.top_level_window(), title=_('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def is_wallet_cashshuffle_compatible(self):
from electroncash.wallet import ImportedWalletBase, Multisig_Wallet
if (self.wallet.is_watching_only()
or self.wallet.is_hardware()
or isinstance(self.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return False # display disabled (for now)
_cs_reminder_pixmap = None
def do_cash_shuffle_reminder(self):
if not self.remind_cashshuffle_enabled:
# NB: This is now disabled. We return early from this function.
# Amaury recommended we do this prompting/reminder in a future
# release after the initial public release, or we roll it out
# for a subset of users (hence this flag).
return
if self.cleaned_up or not self.wallet or not self.is_wallet_cashshuffle_compatible():
return
from electroncash_plugins.shuffle.conf_keys import ConfKeys
p = self.cashshuffle_plugin_if_loaded()
storage = self.wallet.storage
cashshuffle_flag = storage.get(ConfKeys.PerWallet.ENABLED, False)
enabled = cashshuffle_flag and p and p.is_enabled()
nagger_answer = storage.get(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, None)
if not enabled:
if nagger_answer is None: # nagger_answer is None if they've never said "Never ask"
if __class__._cs_reminder_pixmap is None:
# lazy init. Cache it to class level.
size = QSize(150, int(150/1.4419)) # Important to preserve aspect ratio in .svg file here
# NB: doing it this way, with a QIcon, will take into account devicePixelRatio and end up possibly producing a very hi quality image from the SVG, larger than size
__class__._cs_reminder_pixmap = QIcon(":icons/CashShuffleLogos/logo-vertical.svg").pixmap(size)
icon = __class__._cs_reminder_pixmap
message = '''
<big>{}</big></b>
<p>{}</p>
'''.format(_("CashShuffle is disabled for this wallet.") if not cashshuffle_flag else _("CashShuffle is disabled."),
_("Would you like to enable CashShuffle for this wallet?"))
info = ' '.join([_("If you enable it, DeLight will shuffle your coins for greater <b>privacy</b>. However, you will pay fractions of a penny per shuffle in transaction fees."),
_("(You can always toggle it later using the CashShuffle button.)")])
res, chkd = self.msg_box(icon=icon,
parent=self.top_level_window(),
title=_('Would you like to turn on CashShuffle?'),
text=message, rich_text=True, informative_text=info,
checkbox_text=_("Never ask for this wallet"),
buttons=(_('Enable CashShuffle'), _("Not now")),
defaultButton=_('Enable CashShuffle'), escapeButton=("Not now") )
if chkd:
# they don't want to be asked again, so just remember what they answered and apply this answer each time.
storage.put(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, bool(res==0))
else:
# They's specified "Never ask", so apply whatever button they pushed when they said that as the auto-setting.
res = 0 if nagger_answer else 1 # if nagge_answer was True, no prompt, just auto-enable, otherwise leave it disabled.
if res == 0:
self.toggle_cashshuffle()
def restart_cashshuffle(self, msg = None, parent = None):
if (parent or self).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.restart_all()
self.notify(_("CashShuffle restarted"))
else:
self.notify(_("CashShuffle could not be restarted"))
_cash_shuffle_flag = 0
def cashshuffle_set_flag(self, flag):
flag = int(flag)
changed = flag != self._cash_shuffle_flag
if not changed:
return
if flag:
def onClick():
KillPopupLabel("CashShuffleError")
self.show_cashshuffle_settings()
ShowPopupLabel(name = "CashShuffleError",
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Server Error"),_("Right-click to resolve")),
target=self.cashshuffle_status_button,
timeout=20000, onClick=onClick, onRightClick=onClick,
dark_mode = ColorScheme.dark_scheme)
else:
KillPopupLabel("CashShuffleError")
self.print_error("Cash Shuffle flag is now {}".format(flag))
oldTip = self.cashshuffle_status_button.statusTip()
self._cash_shuffle_flag = flag
self.update_status()
newTip = self.cashshuffle_status_button.statusTip()
if newTip != oldTip:
self.statusBar().showMessage(newTip, 7500)
def cashshuffle_get_flag(self):
return self._cash_shuffle_flag
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
def register_new_cash_account(self, addr = None):
''' Initiates the "Register a new cash account" dialog.
If addr is none, will use self.receive_address. '''
addr = addr or self.receive_address or self.wallet.get_receiving_address()
if not addr:
self.print_error("register_new_cash_account: no receive address specified")
return
def on_link(ignored):
webopen('https://www.devaultid.com/')
name, placeholder = '', 'Satoshi_Nakamoto'
while True:
lh = self.wallet.get_local_height()
name = line_dialog(self, _("Register A New DeVault ID"),
(_("You are registering a new <a href='ca'>DeVault ID</a> for your address <b><pre>{address}</pre></b>").format(address=addr.to_ui_string())
+ "<<br>" + _("How it works: <a href='ca'>DeVault IDs</a> registrations work by issuing an <b>OP_RETURN</b> transaction to yourself, costing fractions of a penny. "
"You will be offered the opportunity to review the generated transaction before broadcasting it to the blockchain.")
+ "<br><br>" + _("The current block height is <b><i>{block_height}</i></b>, so the new cash account will likely look like: <b><u><i>AccountName<i>#{number}</u></b>.")
.format(block_height=lh or '???', number=max(cashacct.bh2num(lh or 0)+1, 0) or '???')
+ "<br><br>" + _("Specify the <b>account name</b> below (limited to 99 characters):") ),
_("Proceed to Send Tab"), default=name, linkActivated=on_link,
placeholder=placeholder, disallow_empty=True,
icon=QIcon(":icons/cashacct-logo.png"))
if name is None:
# user cancel
return
name = name.strip()
if not cashacct.name_accept_re.match(name):
self.show_error(_("The specified name cannot be used for a DeVault IDs registration. You must specify 1-99 alphanumeric (ASCII) characters, without spaces (underscores are permitted as well)."))
continue
self._reg_new_cash_account(name, addr)
return
def _reg_new_cash_account(self, name, addr):
self.show_send_tab()
self.do_clear()
# Enabled OP_RETURN stuff even if disabled in prefs. Next do_clear call will reset to prefs presets.
self.message_opreturn_e.setVisible(True)
self.opreturn_rawhex_cb.setVisible(True)
self.opreturn_label.setVisible(True)
# Prevent user from modifying required fields, and hide what we
# can as well.
self.message_opreturn_e.setText(cashacct.ScriptOutput.create_registration(name, addr).script[1:].hex())
self.message_opreturn_e.setFrozen(True)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_rawhex_cb.setDisabled(True)
self.amount_e.setAmount(0)
self.amount_e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setHidden(True)
self.payto_label.setHidden(True)
# Set a default description -- this we allow them to edit
self.message_e.setText(
_("DeVault IDs Registration: '{name}' -> {address}").format(
name=name, address=addr.to_ui_string()
)
)
# set up "Helpful Window" informing user registration will
# not be accepted until at least 1 confirmation.
cashaccounts_never_show_send_tab_hint = self.config.get('cashaccounts_never_show_send_tab_hint', False)
if not cashaccounts_never_show_send_tab_hint:
msg1 = (
_("The Send Tab has been filled-in with your <b>DeVault IDs</b> registration data.")
+ "<br><br>" + _("Please review the transaction, save it, and/or broadcast it at your leisure.")
)
msg2 = ( _("After at least <i>1 confirmation</i>, you will be able to use your new <b>DeVault ID</b>, and it will be visible in DeLight in the <b>Addresses</b> tab.")
)
msg3 = _("If you wish to control which specific coins are used to "
"fund this registration transaction, feel free to use the "
"Coins and/or Addresses tabs' Spend-from facility.\n\n"
"('Spend from' is a right-click menu option in either tab.)")
res = self.msg_box(
# TODO: get SVG icon..
parent = self, icon=QIcon(":icons/cashacct-logo.png").pixmap(75, 75),
title=_('Register A New DeVault ID'), rich_text=True,
text = msg1, informative_text = msg2, detail_text = msg3,
checkbox_text=_("Never show this again"), checkbox_ischecked=False
)
if res[1]:
# never ask checked
self.config.set_key('cashaccounts_never_show_send_tab_hint', True)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
had_sorting = parent.history_list.isSortingEnabled()
if had_sorting:
parent.history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting:
parent.history_list.setSortingEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
txns = self.notifs_get_and_clear()
if txns:
# Combine the transactions
n_ok, n_cashacct, total_amount = 0, 0, 0
last_seen_ca_name = ''
ca_txs = dict() # 'txid' -> ('name', address) -- will be given to contacts_list for "unconfirmed registrations" display
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
for _typ, addr, val in tx.outputs():
# Find DeVault ID registrations that are for addresses *in* this wallet
if isinstance(addr, cashacct.ScriptOutput) and parent.wallet.is_mine(addr.address):
n_cashacct += 1
last_seen_ca_name = addr.name
txid = tx.txid_fast()
if txid: ca_txs[txid] = (addr.name, addr.address)
if not is_relevant:
continue
total_amount += v
n_ok += 1
if n_cashacct:
# Unhide the Addresses tab if cash account reg tx seen
# and user never explicitly hid it.
if parent.config.get("show_addresses_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Addresses tab.
parent.toggle_tab(parent.addresses_tab)
# Do same for console tab
if parent.config.get("show_contacts_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Console tab.
parent.toggle_tab(parent.contacts_tab)
if ca_txs:
# Notify contact_list of potentially unconfirmed txs
parent.contact_list.ca_update_potentially_unconfirmed_registrations(ca_txs)
if parent.wallet.storage.get('gui_notify_tx', True):
ca_text = ''
if n_cashacct > 1:
# plural
ca_text = " + " + _("{number_of_cashaccounts} DeVault IDs registrations").format(number_of_cashaccounts = n_cashacct)
elif n_cashacct == 1:
# singular
ca_text = " + " + _("1 DeVault IDs registration ({cash_accounts_name})").format(cash_accounts_name = last_seen_ca_name)
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(max(n_ok, n_cashacct)))
if max(n_ok, n_cashacct) > 1:
parent.notify(_("{} new transactions: {}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
else:
parent.notify(_("New transaction: {}").format(parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
elif n_cashacct:
# No total amount (was just a cashacct reg tx)
ca_text = ca_text[3:] # pop off the " + "
if n_cashacct > 1:
parent.notify(_("{} new transactions: {}")
.format(n_cashacct, ca_text))
else:
parent.notify(_("New transaction: {}").format(ca_text))
|
data_utils.py
|
"""
Miscellaneous functions manage data.
Date: September 2018
Author: Ignacio Heredia
Email: iheredia@ifca.unican.es
Github: ignacioheredia
"""
import os
import threading
from multiprocessing import Pool
import queue
import subprocess
import warnings
import base64
import numpy as np
import requests
from tqdm import tqdm
from tensorflow.keras.utils import to_categorical, Sequence
import cv2
import albumentations
from albumentations.augmentations import transforms
from albumentations.imgaug import transforms as imgaug_transforms
def load_data_splits(splits_dir, im_dir,use_location, split_name='train'):
"""
Load the data arrays from the [train/val/test].txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_label_number' 'image_location_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
split_name : str
Name of the data split to load
use_location : boolean
to work properly with or without location data
Returns
-------
X : Numpy array of strs
First colunm: Contains 'absolute_path_to_file' to images.
y : Numpy array of int32
Image label number
"""
if use_location :
print("con location")
#Usual workflow with extra stuff in order to return location labels properly
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
else:
print("sin location")
#If no location data, the workflow resumes as usual
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = (['rclone', 'copy', frompath, topath])
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
if error:
warnings.warn("Error while mounting NextCloud: {}".format(error))
return output, error
def load_class_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class names...")
class_names = np.genfromtxt(os.path.join(splits_dir, 'classes.txt'), dtype='str', delimiter='/n')
return class_names
def load_class_info(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class info...")
class_info = np.genfromtxt(os.path.join(splits_dir, 'info.txt'), dtype='str', delimiter='/n')
return class_info
def load_image(filename, filemode='local'):
"""
Function to load a local image path (or an url) into a numpy array.
Parameters
----------
filename : str
Path or url to the image
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
Returns
-------
A numpy array
"""
if filemode == 'local':
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
raise ValueError('The local path does not exist or does not correspond to an image: \n {}'.format(filename))
elif filemode == 'url':
try:
if filename.startswith('data:image'): # base64 encoded string
data = base64.b64decode(filename.split(';base64,')[1])
else: # normal url
data = requests.get(filename).content
data = np.frombuffer(data, np.uint8)
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
if image is None:
raise Exception
except:
raise ValueError('Incorrect url path: \n {}'.format(filename))
else:
raise ValueError('Invalid value for filemode.')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # change from default BGR OpenCV format to Python's RGB format
return image
def preprocess_batch(batch, mean_RGB, std_RGB, mode='tf', channels_first=False):
"""
Standardize batch to feed the net. Adapted from [1] to take replace the default imagenet mean and std.
[1] https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
Parameters
----------
batch : list of numpy arrays
mean_RGB, std_RGB : list of floats, len=3
Mean/std RGB values for your dataset.
channels_first : bool
Use batch of shape (N, C, H, W) instead of (N, H, W, C)
Returns
-------
Numpy array
"""
assert type(batch) is list, "Your batch must be a list of numpy arrays"
mean_RGB, std_RGB = np.array(mean_RGB), np.array(std_RGB)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
if mode == 'caffe':
batch = batch[:, :, :, ::-1] # switch from RGB to BGR
if mode == 'tf':
batch /= 127.5 # scaling between [1, -1]
if mode == 'torch':
batch /= std_RGB
if channels_first:
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
return batch.astype(np.float32)
def augment(im, params=None):
"""
Perform data augmentation on some image using the albumentations package.
Parameters
----------
im : Numpy array
params : dict or None
Contains the data augmentation parameters
Mandatory keys:
- h_flip ([0,1] float): probability of performing an horizontal left-right mirroring.
- v_flip ([0,1] float): probability of performing an vertical up-down mirroring.
- rot ([0,1] float): probability of performing a rotation to the image.
- rot_lim (int): max degrees of rotation.
- stretch ([0,1] float): probability of randomly stretching an image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- blur ([0,1] float): probability of randomly blurring an image.
- pixel_noise ([0,1] float): probability of randomly adding pixel noise to an image.
- pixel_sat ([0,1] float): probability of randomly using HueSaturationValue in the image.
- cutout ([0,1] float): probability of using cutout in the image.
Returns
-------
Numpy array
"""
## 1) Crop the image
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx, channels = im.shape
crop_size = int(crop * min([ly, lx]))
rand_x = np.random.randint(low=0, high=lx - crop_size + 1)
rand_y = np.random.randint(low=0, high=ly - crop_size + 1)
crop = transforms.Crop(x_min=rand_x,
y_min=rand_y,
x_max=rand_x + crop_size,
y_max=rand_y + crop_size)
im = crop(image=im)['image']
## 2) Now add the transformations for augmenting the image pixels
transform_list = []
# Add random stretching
if params['stretch']:
transform_list.append(
imgaug_transforms.IAAPerspective(scale=0.1, p=params['stretch'])
)
# Add random rotation
if params['rot']:
transform_list.append(
transforms.Rotate(limit=params['rot_lim'], p=params['rot'])
)
# Add horizontal flip
if params['h_flip']:
transform_list.append(
transforms.HorizontalFlip(p=params['h_flip'])
)
# Add vertical flip
if params['v_flip']:
transform_list.append(
transforms.VerticalFlip(p=params['v_flip'])
)
# Add some blur to the image
if params['blur']:
transform_list.append(
albumentations.OneOf([
transforms.MotionBlur(blur_limit=7, p=1.),
transforms.MedianBlur(blur_limit=7, p=1.),
transforms.Blur(blur_limit=7, p=1.),
], p=params['blur'])
)
# Add pixel noise
if params['pixel_noise']:
transform_list.append(
albumentations.OneOf([
transforms.CLAHE(clip_limit=2, p=1.),
imgaug_transforms.IAASharpen(p=1.),
imgaug_transforms.IAAEmboss(p=1.),
transforms.RandomBrightnessContrast(contrast_limit=0, p=1.),
transforms.RandomBrightnessContrast(brightness_limit=0, p=1.),
transforms.RGBShift(p=1.),
transforms.RandomGamma(p=1.)#,
# transforms.JpegCompression(),
# transforms.ChannelShuffle(),
# transforms.ToGray()
], p=params['pixel_noise'])
)
# Add pixel saturation
if params['pixel_sat']:
transform_list.append(
transforms.HueSaturationValue(p=params['pixel_sat'])
)
# Remove randomly remove some regions from the image
if params['cutout']:
ly, lx, channels = im.shape
scale_low, scale_high = 0.05, 0.25 # min and max size of the squares wrt the full image
scale = np.random.uniform(scale_low, scale_high)
transform_list.append(
transforms.Cutout(num_holes=8, max_h_size=int(scale*ly), max_w_size=int(scale*lx), p=params['cutout'])
)
# Compose all image transformations and augment the image
augmentation_fn = albumentations.Compose(transform_list)
im = augmentation_fn(image=im)['image']
return im
def resize_im(im, height, width):
resize_fn = transforms.Resize(height=height, width=width)
return resize_fn(image=im)['image']
def data_generator(inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Generator to feed Keras fit function
Parameters
----------
inputs : Numpy array, shape (N, H, W, C)
targets : Numpy array, shape (N)
batch_size : int
shuffle : bool
aug_params : dict
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
Returns
-------
Generator of inputs and labels
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
# Create list of indices
idxs = np.arange(len(inputs))
if shuffle:
np.random.shuffle(idxs)
# # Reshape targets to the correct shape
# if len(targets.shape) == 1:
# print('reshaping targets')
# targets = targets.reshape(-1, 1)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
excerpt = idxs[start_idx:start_idx + batch_size]
batch_X = []
for i in excerpt:
im = load_image(inputs[i], filemode='local')
im = augment(im, params=aug_params)
im = resize_im(im, height=im_size, width=im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=mean_RGB, std_RGB=std_RGB, mode=preprocess_mode)
batch_y = to_categorical(targets[excerpt], num_classes=num_classes)
yield batch_X, batch_y
def buffered_generator(source_gen, buffer_size=10):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
Author: Benanne (github-kaggle/benanne/ndsb)
Parameters
----------
source_gen : generator
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Returns
-------
Buffered generator
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
class data_sequence(Sequence):
"""
Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
return batch_X, batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
def standard_tencrop_batch(im, crop_prop=0.9):
"""
Returns an ordered ten crop batch of images from an original image (corners, center + mirrors).
Parameters
----------
im : numpy array, type np.uint8
crop_prop: float, [0, 1]
Size of the crop with respect to the whole image
Returns
-------
List of 10 numpy arrays
"""
batch = []
min_side = np.amin(im.shape[:2])
im = resize_im(im, height=min_side, width=min_side) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = transforms.Crop(x_min=0,
y_min=0,
x_max=crop_size,
y_max=crop_size)(image=im)['image'] # top-left
c2 = transforms.Crop(x_min=0,
y_min=h-crop_size,
x_max=crop_size,
y_max=h)(image=im)['image'] # bottom-left
c3 = transforms.Crop(x_min=w-crop_size,
y_min=0,
x_max=w,
y_max=crop_size)(image=im)['image'] # top-right
c4 = transforms.Crop(x_min=w-crop_size,
y_min=h-crop_size,
x_max=w,
y_max=h)(image=im)['image'] # bottom-right
c5 = transforms.Crop(x_min=np.round((w-crop_size)/2).astype(int),
y_min=np.round((h-crop_size)/2).astype(int),
x_max=np.round((w+crop_size)/2).astype(int),
y_max=np.round((h+crop_size)/2).astype(int))(image=im)['image'] # center
# Save crop and its mirror
lr_aug = albumentations.HorizontalFlip(p=1)
for image in [c1, c2, c3, c4, c5]:
batch.append(image)
batch.append(lr_aug(image=image)['image'])
return batch
class k_crop_data_sequence(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs, mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
return batch_X
def im_stats(filename):
"""
Helper for function compute_meanRGB
"""
im = load_image(filename, filemode='local')
mean = np.mean(im, axis=(0, 1))
std = np.std(im, axis=(0, 1))
return mean.tolist(), std.tolist()
def compute_meanRGB(im_list, verbose=False, workers=4):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
verbose : bool
Show progress bar
workers: int
Numbers of parallel workers to perform the computation with.
References
----------
https://stackoverflow.com/questions/41920124/multiprocessing-use-tqdm-to-display-a-progress-bar
"""
print('Computing mean RGB pixel with {} workers...'.format(workers))
with Pool(workers) as p:
r = list(tqdm(p.imap(im_stats, im_list),
total=len(im_list),
disable=verbose))
r = np.asarray(r)
mean, std = r[:, 0], r[:, 1]
mean, std = np.mean(mean, axis=0), np.mean(std, axis=0)
print('Mean RGB pixel: {}'.format(mean.tolist()))
print('Standard deviation of RGB pixel: {}'.format(std.tolist()))
return mean.tolist(), std.tolist()
def compute_classweights(labels, max_dim=None, mode='balanced'):
"""
Compute the class weights for a set of labels to account for label imbalance.
Parameters
----------
labels : numpy array, type (ints), shape (N)
max_dim : int
Maximum number of classes. Default is the max value in labels.
mode : str, {'balanced', 'log'}
Returns
-------
Numpy array, type (float32), shape (N)
"""
if mode is None:
return None
weights = np.bincount(labels)
weights = np.sum(weights) / weights
# Fill the count if some high number labels are not present in the sample
if max_dim is not None:
diff = max_dim - len(weights)
if diff != 0:
weights = np.pad(weights, pad_width=(0, diff), mode='constant', constant_values=0)
# Transform according to different modes
if mode == 'balanced':
pass
elif mode == 'log':
# do not use --> produces numerical instabilities at inference when transferring weights trained on GPU to CPU
weights = np.log(weights) # + 1
else:
raise ValueError('{} is not a valid option for parameter "mode"'.format(mode))
return weights.astype(np.float32)
def json_friendly(d):
"""
Return a json friendly dictionary (mainly remove numpy data types)
"""
new_d = {}
for k, v in d.items():
if isinstance(v, (np.float32, np.float64)):
v = float(v)
elif isinstance(v, (np.ndarray, list)):
if isinstance(v[0], (np.float32, np.float64)):
v = np.array(v).astype(float).tolist()
else:
v = np.array(v).tolist()
new_d[k] = v
return new_d
|
bridge.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import time
try:
import queue
except ImportError:
import Queue as queue
import logging
import os
import threading
import collections
from concurrent import futures
import grpc
import google.protobuf.any_pb2
import tensorflow.compat.v1 as tf
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import trainer_worker_service_pb2 as tws_pb
from fedlearner.common import trainer_worker_service_pb2_grpc as tws_grpc
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
def make_ready_client(channel, stop_event=None):
channel_ready = grpc.channel_ready_future(channel)
wait_secs = 0.5
start_time = time.time()
while (stop_event is None) or (not stop_event.is_set()):
try:
channel_ready.result(timeout=wait_secs)
break
except grpc.FutureTimeoutError:
logging.warning('Channel has not been ready for %.2f seconds',
time.time()-start_time)
if wait_secs < 5.0:
wait_secs *= 1.2
except Exception as e: # pylint: disable=broad-except
logging.warning('Waiting channel ready: %s', repr(e))
return tws_grpc.TrainerWorkerServiceStub(channel)
class Bridge(object):
class TrainerWorkerServicer(tws_grpc.TrainerWorkerServiceServicer):
def __init__(self, bridge):
super(Bridge.TrainerWorkerServicer, self).__init__()
self._bridge = bridge
def Transmit(self, request, context):
return self._bridge._transmit_handler(request)
def StreamTransmit(self, request_iterator, context):
for request in request_iterator:
yield self._bridge._transmit_handler(request)
def LoadDataBlock(self, request, context):
return self._bridge._data_block_handler(request)
def Connect(self, request, context):
return self._bridge._connect_handler(request)
def Heartbeat(self, request, context):
return self._bridge._heartbeat_handler(request)
def Terminate(self, request, context):
return self._bridge._terminate_handler(request)
def __init__(self,
role,
listen_port,
remote_address,
app_id=None,
rank=0,
streaming_mode=True,
compression=grpc.Compression.NoCompression):
self._role = role
self._listen_port = listen_port
self._remote_address = remote_address
if app_id is None:
app_id = 'test_trainer'
self._app_id = app_id
self._rank = rank
self._streaming_mode = streaming_mode
self._compression = compression
self._prefetch_handlers = []
self._data_block_handler_fn = None
# Connection related
self._connected = False
self._terminated = False
self._peer_terminated = False
self._identifier = '%s-%s-%d-%d' % (
app_id, role, rank, int(time.time())) # Ensure unique per run
self._peer_identifier = ''
# data transmit
self._condition = threading.Condition()
self._current_iter_id = None
self._next_iter_id = 0
self._received_data = {}
# grpc client
self._transmit_send_lock = threading.Lock()
self._grpc_options = [
('grpc.max_send_message_length', 2**31-1),
('grpc.max_receive_message_length', 2**31-1)
]
self._channel = make_insecure_channel(
remote_address, ChannelType.REMOTE,
options=self._grpc_options, compression=self._compression)
self._client = tws_grpc.TrainerWorkerServiceStub(self._channel)
self._next_send_seq_num = 0
self._transmit_queue = queue.Queue()
self._client_daemon = None
self._client_daemon_shutdown_fn = None
# server
self._transmit_receive_lock = threading.Lock()
self._next_receive_seq_num = 0
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=self._grpc_options,
compression=self._compression)
tws_grpc.add_TrainerWorkerServiceServicer_to_server(
Bridge.TrainerWorkerServicer(self), self._server)
self._server.add_insecure_port('[::]:%d' % listen_port)
def __del__(self):
self.terminate()
@property
def role(self):
return self._role
def _client_daemon_fn(self):
stop_event = threading.Event()
generator = None
channel = make_insecure_channel(
self._remote_address, ChannelType.REMOTE,
options=self._grpc_options, compression=self._compression)
client = make_ready_client(channel, stop_event)
lock = threading.Lock()
resend_list = collections.deque()
def shutdown_fn():
with lock:
while len(resend_list) > 0 or not self._transmit_queue.empty():
logging.debug(
"Waiting for resend queue's being cleaned. "
"Resend queue size: %d", len(resend_list))
lock.release()
time.sleep(1)
lock.acquire()
stop_event.set()
if generator is not None:
generator.cancel()
self._client_daemon_shutdown_fn = shutdown_fn
while not stop_event.is_set():
try:
def iterator():
with lock:
resend_msgs = list(resend_list)
for item in resend_msgs:
logging.warning("Streaming resend message seq_num=%d",
item.seq_num)
yield item
while True:
item = self._transmit_queue.get()
with lock:
resend_list.append(item)
logging.debug("Streaming send message seq_num=%d",
item.seq_num)
yield item
generator = client.StreamTransmit(iterator())
for response in generator:
if response.status.code == common_pb.STATUS_SUCCESS:
logging.debug("Message with seq_num=%d is "
"confirmed", response.next_seq_num-1)
elif response.status.code == \
common_pb.STATUS_MESSAGE_DUPLICATED:
logging.debug("Resent Message with seq_num=%d is "
"confirmed", response.next_seq_num-1)
elif response.status.code == \
common_pb.STATUS_MESSAGE_MISSING:
raise RuntimeError("Message with seq_num=%d is "
"missing!" % (response.next_seq_num-1))
else:
raise RuntimeError("Trainsmit failed with %d" %
response.status.code)
with lock:
while resend_list and \
resend_list[0].seq_num < response.next_seq_num:
resend_list.popleft()
min_seq_num_to_resend = resend_list[0].seq_num \
if resend_list else "NaN"
logging.debug(
"Resend queue size: %d, starting from seq_num=%s",
len(resend_list), min_seq_num_to_resend)
except Exception as e: # pylint: disable=broad-except
if not stop_event.is_set():
logging.warning("Bridge streaming broken: %s.", repr(e))
finally:
generator.cancel()
channel.close()
logging.warning(
"Restarting streaming: resend queue size: %d, "
"starting from seq_num=%s", len(resend_list),
resend_list and resend_list[0].seq_num or "NaN")
channel = make_insecure_channel(
self._remote_address, ChannelType.REMOTE,
options=self._grpc_options, compression=self._compression)
client = make_ready_client(channel, stop_event)
self._check_remote_heartbeat()
def _transmit(self, msg):
assert self._connected, "Cannot transmit before connect"
with self._transmit_send_lock:
msg.seq_num = self._next_send_seq_num
self._next_send_seq_num += 1
if self._streaming_mode:
self._transmit_queue.put(msg)
return
while True:
try:
rsp = self._client.Transmit(msg)
assert rsp.status.code == common_pb.STATUS_SUCCESS, \
"Transmit error with code %d."%rsp.status.code
break
except Exception as e: # pylint: disable=broad-except
logging.warning("Bridge transmit failed: %s. " \
"Retry in 1 second...", repr(e))
self._channel.close()
time.sleep(1)
self._channel = make_insecure_channel(
self._remote_address, ChannelType.REMOTE,
options=self._grpc_options,
compression=self._compression)
self._client = make_ready_client(self._channel)
self._check_remote_heartbeat()
def _transmit_handler(self, request):
assert self._connected, "Cannot transmit before connect"
with self._transmit_receive_lock:
logging.debug("Received message seq_num=%d."
" Wanted seq_num=%d.",
request.seq_num, self._next_receive_seq_num)
if request.seq_num > self._next_receive_seq_num:
return tws_pb.TrainerWorkerResponse(
status=common_pb.Status(
code=common_pb.STATUS_MESSAGE_MISSING),
next_seq_num=self._next_receive_seq_num)
if request.seq_num < self._next_receive_seq_num:
return tws_pb.TrainerWorkerResponse(
status=common_pb.Status(
code=common_pb.STATUS_MESSAGE_DUPLICATED),
next_seq_num=self._next_receive_seq_num)
# request.seq_num == self._next_receive_seq_num
self._next_receive_seq_num += 1
if request.HasField('start'):
with self._condition:
self._received_data[request.start.iter_id] = {}
elif request.HasField('commit'):
pass
elif request.HasField('data'):
with self._condition:
assert request.data.iter_id in self._received_data
self._received_data[
request.data.iter_id][
request.data.name] = request.data
self._condition.notifyAll()
elif request.HasField('prefetch'):
for func in self._prefetch_handlers:
func(request.prefetch)
else:
return tws_pb.TrainerWorkerResponse(
status=common_pb.Status(
code=common_pb.STATUS_INVALID_REQUEST),
next_seq_num=self._next_receive_seq_num)
return tws_pb.TrainerWorkerResponse(
next_seq_num=self._next_receive_seq_num)
def _data_block_handler(self, request):
assert self._connected, "Cannot load data before connect"
if not self._data_block_handler_fn:
raise RuntimeError("Received DataBlockMessage but" \
" no handler registered")
self._data_block_handler_fn(request)
return common_pb.Status(code=common_pb.STATUS_SUCCESS)
def _connect_handler(self, request):
assert request.app_id == self._app_id, \
"Connection failed. Application id mismatch: %s vs %s"%(
request.app_id, self._app_id)
assert request.worker_rank == self._rank, \
"Connection failed. Rank mismatch: %s vs %s"%(
request.worker_rank, self._rank)
assert len(request.identifier) > 0, \
"Connection failed. An identifier should be offered!"
with self._condition:
if self._connected:
# If a duplicated reqeust from peer, just ignore it.
# If a new connect request from peer, suicide.
if request.identifier != self._peer_identifier:
logging.error('Suicide as peer %s has restarted!',
request.identifier)
os._exit(138) # Tell Scheduler to restart myself
else:
self._peer_identifier = request.identifier
self._connected = True
self._condition.notifyAll()
return tws_pb.ConnectResponse(app_id=self._app_id,
worker_rank=self._rank)
def _heartbeat_handler(self, request):
return tws_pb.HeartbeatResponse(app_id=self._app_id,
worker_rank=self._rank,
current_iter_id=self._current_iter_id)
def _terminate_handler(self, request):
with self._condition:
self._peer_terminated = True
self._condition.notifyAll()
return tws_pb.TerminateResponse()
def _check_remote_heartbeat(self):
try:
rsp = self._client.Heartbeat(tws_pb.HeartbeatRequest())
logging.debug("Heartbeat success: %s:%d at iteration %s.",
rsp.app_id, rsp.worker_rank, rsp.current_iter_id)
return True
except Exception as e: # pylint: disable=broad-except
logging.warning("Heartbeat request failed: %s", repr(e))
return False
def connect(self):
if self._connected:
logging.warning("Bridge already connected!")
return
self._server.start()
# Get ACK from peer
msg = tws_pb.ConnectRequest(app_id=self._app_id,
worker_rank=self._rank,
identifier=self._identifier)
while True:
try:
self._client.Connect(msg)
except Exception as e: # pylint: disable=broad-except
logging.warning("Bridge failed to connect: %s. " \
"Retry in 1 second...",
repr(e))
time.sleep(1)
continue
break
logging.debug('Has connected to peer.')
# Ensure REQ from peer
with self._condition:
while not self._connected:
self._condition.wait()
logging.debug('Connected from peer.')
if self._streaming_mode:
logging.debug('enter streaming_mode.')
self._client_daemon = threading.Thread(
target=self._client_daemon_fn)
self._client_daemon.start()
logging.debug('finish connect.')
def terminate(self, forced=False):
if not self._connected or self._terminated:
return
self._terminated = True
try:
if self._client_daemon is not None:
self._client_daemon_shutdown_fn()
self._client_daemon.join()
except Exception as e: # pylint: disable=broad-except
logging.warning(
'Error during streaming shutdown: %s', repr(e))
# Get ACK from peer
while True:
try:
self._client.Terminate(tws_pb.TerminateRequest())
break
except Exception as e: # pylint: disable=broad-except
logging.warning(
"Failed to send terminate message: %s. " \
"Retry in 1 second...", repr(e))
time.sleep(1)
continue
logging.debug('Waiting for peer to terminate.')
# Ensure REQ from peer
with self._condition:
while not self._peer_terminated:
self._condition.wait()
self._server.stop(None)
logging.debug("Bridge connection terminated")
@property
def current_iter_id(self):
return self._current_iter_id
def new_iter_id(self):
iter_id = self._next_iter_id
self._next_iter_id += 1
return iter_id
def start(self, iter_id):
assert self._current_iter_id is None, "Last iter not finished"
self._current_iter_id = iter_id
msg = tws_pb.TrainerWorkerMessage(start=tws_pb.StartMessage(
iter_id=iter_id))
self._transmit(msg)
logging.debug("Starting iter %d", iter_id)
def commit(self):
assert self._current_iter_id is not None, "Not started yet"
with self._condition:
last_iter_id = self._current_iter_id
self._current_iter_id = None
if last_iter_id in self._received_data:
del self._received_data[last_iter_id]
msg = tws_pb.TrainerWorkerMessage(commit=tws_pb.CommitMessage(
iter_id=last_iter_id))
self._transmit(msg)
logging.debug("iter %d committed", last_iter_id)
def register_data_block_handler(self, func):
assert self._data_block_handler_fn is None, \
"DataBlock handler already registered"
self._data_block_handler_fn = func
def load_data_block(self, count, block_id):
msg = tws_pb.LoadDataBlockRequest(count=count, block_id=block_id)
logging.debug("sending DataBlock with id %s", block_id)
return self._client.LoadDataBlock(msg)
def register_prefetch_handler(self, func):
self._prefetch_handlers.append(func)
def prefetch(self, iter_id, sample_ids):
msg = tws_pb.TrainerWorkerMessage(prefetch=tws_pb.PrefetchMessage(
iter_id=iter_id, sample_ids=sample_ids))
self._transmit(msg)
def send_proto(self, iter_id, name, proto):
any_proto = google.protobuf.any_pb2.Any()
any_proto.Pack(proto)
msg = tws_pb.TrainerWorkerMessage(data=tws_pb.DataMessage(
iter_id=iter_id, name=name, any_data=any_proto))
self._transmit(msg)
logging.debug('Data: send protobuf %s for iter %d. seq_num=%d.',
name, iter_id, msg.seq_num)
def send(self, iter_id, name, x):
msg = tws_pb.TrainerWorkerMessage(data=tws_pb.DataMessage(
iter_id=iter_id, name=name, tensor=tf.make_tensor_proto(x)))
self._transmit(msg)
logging.debug('Data: send %s for iter %d. seq_num=%d.',
name, iter_id, msg.seq_num)
def send_op(self, name, x):
def func(x):
assert self._current_iter_id is not None, "Bridge not started"
self.send(self._current_iter_id, name, x.numpy())
out = tf.py_function(func=func, inp=[x], Tout=[], name='send_' + name)
return out
def receive_proto(self, iter_id, name):
logging.debug('Data: Waiting to receive proto %s for iter %d.',
name, iter_id)
with self._condition:
while (iter_id not in self._received_data) \
or (name not in self._received_data[iter_id]):
self._condition.wait()
data = self._received_data[iter_id][name]
logging.debug('Data: received %s for iter %d.', name, iter_id)
return data.any_data
def receive(self, iter_id, name):
logging.debug('Data: Waiting to receive %s for iter %d.', name,
iter_id)
with self._condition:
while (iter_id not in self._received_data) \
or (name not in self._received_data[iter_id]):
self._condition.wait()
data = self._received_data[iter_id][name]
logging.debug('Data: received %s for iter %d.', name, iter_id)
return tf.make_ndarray(data.tensor)
def receive_op(self, name, dtype):
def func():
assert self._current_iter_id is not None, "Bridge not started"
x = self.receive(self._current_iter_id, name)
return tf.convert_to_tensor(x, dtype=dtype)
return tf.py_function(func=func, inp=[], Tout=[dtype])[0]
|
client.py
|
from tkinter import *
import socket
import threading
import sys
class ChatApplication():
def __init__(self):
self.chatroom_size = '750x450+250+100'
self.chatroom_title = 'Chatroom'
self.backgroungcolor = 'pink'
self.chatbox_size = (90, 25)
self.messagebox_width = 70
self.send_button_background_color = 'cyan'
self.send_button_foreground_color = 'black'
def get_user_name(self):
self.user_name = input("Enter User name : ")
def get_connection_data(self):
ip = input("Enter ip to connect [Enter blank for default]")
port = int(input("Enter the port to connect [Enter blank for deafult]"))
if(len(ip)==0):
ip = "0.0.0.0"
if(len(port)==0):
port = 8000
self.connection_data = (ip, port)
def connect_to_server(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.connect(self.connection_data)
except:
print("Server Not found!")
sys.exit()
def contact_server(self):
self.s.send(str.encode(self.user_name))
self.welcome_msg = self.s.recv(2048).decode()
if "ERROR" in self.welcome_msg:
print(self.welcome_msg)
sys.exit()
self.is_connected = True
def recieve_message_from_server(self):
while self.is_connected:
message = self.s.recv(2048).decode()
if self.user_name in message.split(':')[0]:
message = "Me :" + message.split(':')[1]
self.chatbox.insert(END, str(message))
def send_messages_to_server(self):
message = self.messagebox.get()
self.s.send(str.encode(message))
self.messagebox.delete(0, END)
def chat_screen(self):
self.root = Tk()
self.root.geometry(self.chatroom_size)
self.root.title(self.chatroom_title)
self.root.config(bg = self.backgroungcolor)
self.chatbox = Listbox(self.root, height = self.chatbox_size[1], width = self.chatbox_size[0])
self.chatbox.grid(row = 0, column = 0, padx = (35,20), pady = 30, columnspan = 3)
self.messagebox = Entry(self.root, width = self.messagebox_width)
self.messagebox.grid(row = 1, column = 0, columnspan = 2, padx = (35, 0))
self.send_button = Button(self.root, text = 'Send',
bg = self.send_button_background_color,
fg = self.send_button_foreground_color,
command = self.send_messages_to_server)
self.send_button.grid(row = 1, column = 2)
self.chatbox.insert(END, str(self.welcome_msg))
listen_for_messages_thread = threading.Thread(target = self.recieve_message_from_server)
listen_for_messages_thread.start()
self.root.mainloop()
if __name__ == "__main__":
chat_app = ChatApplication()
chat_app.get_connection_data()
chat_app.get_user_name()
chat_app.connect_to_server()
chat_app.contact_server()
chat_app.chat_screen()
|
pyshell.py
|
import sys
try:
from tkinter import *
except ImportError:
print(
"** IDLE can't import Tkinter.\nYour Python may not be configured for Tk. **"
, file=sys.__stderr__)
raise SystemExit(1)
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk()
root.withdraw()
tkMessageBox.showerror('Idle Cannot Start',
'Idle requires tcl/tk 8.5+, not %s.' % TkVersion, parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1'
PORT = 0
warning_stream = sys.__stderr__
def idle_showwarning(message, category, filename, lineno, file=None, line=None
):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(message, category, filename, lineno,
line=line))
file.write('>>> ')
except (AttributeError, OSError):
pass
_warnings_showwarning = None
def capture_warnings(capture):
"""Replace warning.showwarning with idle_showwarning, or reverse."""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
elif _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.
checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"""Regular text edit window in IDLE, supports breakpoints"""
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind('<<set-breakpoint-here>>', self.set_breakpoint_here)
self.text.bind('<<clear-breakpoint-here>>', self.clear_breakpoint_here)
self.text.bind('<<open-python-shell>>', self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [('Cut', '<<cut>>', 'rmenu_check_cut'), ('Copy',
'<<copy>>', 'rmenu_check_copy'), ('Paste', '<<paste>>',
'rmenu_check_paste'), (None, None, None), ('Set Breakpoint',
'<<set-breakpoint-here>>', None), ('Clear Breakpoint',
'<<clear-breakpoint-here>>', None)]
def color_breakpoint_text(self, color=True):
"""Turn colorizing of breakpoint text on or off"""
if self.io is None:
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, 'break')
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add('BREAK', '%d.0' % lineno, '%d.0' % (lineno + 1))
try:
self.breakpoints.index(lineno)
except ValueError:
self.breakpoints.append(lineno)
try:
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except:
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index('insert')))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index('insert')))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove('BREAK', 'insert linestart', 'insert lineend +1char')
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove('BREAK', '1.0', END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"""Save breakpoints when file is saved"""
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, 'r') as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, 'w') as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, 'breakpoint_error_displayed', False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error', message=
"""Unable to update breakpoint list:
%s""" % str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update()
if self.io is None:
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, 'r') as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename) + 1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"""Retrieves all the breakpoints in the current window"""
text = self.text
ranges = text.tag_ranges('BREAK')
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index + 1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
def _close(self):
"""Extend base method - clear breaks when module is closed"""
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"""Extend base class: IDLE supports a shell and breakpoints"""
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"""Extend base class: colorizer for the shell window itself"""
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove('TODO', '1.0', 'iomark')
self.tag_add('SYNC', '1.0', 'iomark')
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({'stdin': {'background': None, 'foreground':
None}, 'stdout': idleConf.GetHighlight(theme, 'stdout'),
'stderr': idleConf.GetHighlight(theme, 'stderr'), 'console':
idleConf.GetHighlight(theme, 'console')})
def removecolors(self):
for tag in self.tagdefs:
self.tag_remove(tag, 'iomark', 'end')
class ModifiedUndoDelegator(UndoDelegator):
"""Extend base class: forbid insert/delete before the I/O mark"""
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, '<', 'iomark'):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, '<', 'iomark'):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"""Override the base class - just re-raise EOFError"""
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert self.port != 0, 'Socket should have been assigned a port number.'
w = [('-W' + s) for s in sys.warnoptions]
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.pyshell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ['-c', command, str(self.port)]
def start_subprocess(self):
addr = HOST, self.port
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
self.port = self.rpcclt.listening_sock.getsockname()[1]
if PORT != 0:
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET, socket
.SO_REUSEADDR, 1)
self.spawn_subprocess()
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register('console', self.tkconsole)
self.rpcclt.register('stdin', self.tkconsole.stdin)
self.rpcclt.register('stdout', self.tkconsole.stdout)
self.rpcclt.register('stderr', self.tkconsole.stderr)
self.rpcclt.register('flist', self.tkconsole.flist)
self.rpcclt.register('linecache', linecache)
self.rpcclt.register('interp', self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
debug = self.getdebugger()
if debug:
try:
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
console.text.delete('iomark', 'end-1c')
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = (int(console.width) - len(tag) - 4) // 2 * '='
console.write('\n{0} {1} {0}'.format(halfbar, tag))
console.text.mark_set('restart', 'end-1c')
console.text.mark_gravity('restart', 'left')
if not filename:
console.showprompt()
if debug:
debugger_r.restart_subprocess_debugger(self.rpcclt)
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall('exec', 'interrupt_the_server', (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError:
pass
try:
self.rpcclt.close()
except AttributeError:
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"""Make sure subprocess is terminated"""
try:
self.rpcsubproc.kill()
except OSError:
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd:
path = ['']
path.extend(sys.path)
else:
path = sys.path
self.runcommand(
"""if 1:
import sys as _sys
_sys.path = %r
del _sys
"""
% (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == 'OK':
if what is not None:
print(repr(what), file=console)
elif how == 'EXCEPTION':
if self.tkconsole.getvar('<<toggle-jit-stack-viewer>>'):
self.remote_stack_viewer()
elif how == 'ERROR':
errmsg = 'pyshell.ModifiedInterpreter: Subprocess ERROR:\n'
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
try:
self.tkconsole.endexecuting()
except AttributeError:
pass
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(self.tkconsole.
pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue('exec', 'stackviewer', ('flist',), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill='both')
node = TreeNode(sc.canvas, None, item)
node.expand()
gid = 0
def execsource(self, source):
"""Like runsource() but assumes complete exec source"""
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"""Execute an existing file"""
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, 'exec')
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print(
'*** Error in script or command!\nTraceback (most recent call last):'
, file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"""Extend base class method: Stuff the source in the line cache first"""
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action='error', category=SyntaxWarning)
assert isinstance(source, str)
try:
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"""Stuff source in the filename cache"""
filename = '<pyshell#%d>' % self.gid
self.gid = self.gid + 1
lines = source.split('\n')
linecache.cache[filename] = len(source) + 1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"""Prepend sys.path with file's directory if not already included"""
self.runcommand(
"""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
"""
% (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove('ERROR', '1.0', 'end')
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or '<no detail available>'
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1
if lineno == 1:
pos = 'iomark + %d chars' % (offset - 1)
else:
pos = 'iomark linestart + %d lines + %d chars' % (lineno - 1,
offset - 1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write('SyntaxError: %s\n' % msg)
tkconsole.showprompt()
def showtraceback(self):
"""Extend base class method to reset output properly"""
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar('<<toggle-jit-stack-viewer>>'):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != '<>':
del c[key]
def runcommand(self, code):
"""Run the code without invoking the debugger"""
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue('exec', 'runcode', (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"""Override base class method"""
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue('exec', 'runcode',
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno('Exit?',
'Do you want to exit altogether?', default='yes',
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print('IDLE internal error in runcode()', file=self.
tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
elif self.tkconsole.canceled:
self.tkconsole.canceled = False
print('KeyboardInterrupt', file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError:
pass
def write(self, s):
"""Override base class method"""
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror('Port Binding Error',
"IDLE can't bind to a TCP/IP port, which is necessary to communicate with its Python execution server. This might be because no networking is installed on this computer. Run IDLE with the -n command line switch to start without a subprocess and refer to Help/IDLE Help 'Running without a subprocess' for further details."
, parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror('Subprocess Startup Error',
"IDLE's subprocess didn't make connection. Either IDLE can't start a subprocess or personal firewall software is blocking the connection."
, parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror('Already executing',
'The Python Shell window is already executing a command; please wait until it is finished.'
, parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = 'Python ' + python_version() + ' Shell'
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
menu_specs = [('file', '_File'), ('edit', '_Edit'), ('debug', '_Debug'),
('options', '_Options'), ('windows', '_Window'), ('help', '_Help')]
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != 'shell':
ms.insert(2, ('shell', 'She_ll'))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
self.indentwidth = 8
self.context_use_ps1 = True
text = self.text
text.configure(wrap='char')
text.bind('<<newline-and-indent>>', self.enter_callback)
text.bind('<<plain-newline-and-indent>>', self.linefeed_callback)
text.bind('<<interrupt-execution>>', self.cancel_callback)
text.bind('<<end-of-file>>', self.eof_callback)
text.bind('<<open-stack-viewer>>', self.open_stack_viewer)
text.bind('<<toggle-debugger>>', self.toggle_debugger)
text.bind('<<toggle-jit-stack-viewer>>', self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind('<<view-restart>>', self.view_restart_mark)
text.bind('<<restart-shell>>', self.restart_shell)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, 'stdin', iomenu.encoding)
self.stdout = PseudoOutputFile(self, 'stdout', iomenu.encoding)
self.stderr = PseudoOutputFile(self, 'stderr', iomenu.encoding)
self.console = PseudoOutputFile(self, 'console', iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
import pydoc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
self.history = self.History(self.text)
self.pollinterval = 50
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
'You can only toggle the debugger when idle', parent=self.text)
self.set_debugger_indicator()
return 'break'
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar('<<toggle-debugger>>', not not db)
def toggle_jit_stack_viewer(self, event=None):
pass
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write('[DEBUG OFF]\n')
sys.ps1 = '>>> '
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt, self
)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = '[DEBUG ON]\n>>> '
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"""Helper for ModifiedInterpreter"""
self.resetoutput()
self.executing = 1
def endexecuting(self):
"""Helper for ModifiedInterpreter"""
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"""Extend EditorWindow.close()"""
if self.executing:
response = tkMessageBox.askokcancel('Kill?',
"""Your program is still running!
Do you want to kill it?""",
default='ok', parent=self.text)
if response is False:
return 'cancel'
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"""Extend EditorWindow._close(), shut down debugger and execution server"""
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"""Override EditorWindow method: never remove the colorizer"""
return True
def short_title(self):
return self.shell_title
COPYRIGHT = (
'Type "copyright", "credits" or "license()" for more information.')
def begin(self):
self.text.mark_set('iomark', 'insert')
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ('==== No Subprocess ====\n\n' +
"""WARNING: Running IDLE without a Subprocess is deprecated
"""
+
'and will be removed in a later version. See Help/IDLE Help\n'
+ 'for details.\n\n')
sys.displayhook = rpc.displayhook
self.write('Python %s on %s\n%s\n%s' % (sys.version, sys.platform,
self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None
return True
def stop_readline(self):
if not self.reading:
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ''
line = self.text.get('iomark', 'end-1c')
if len(line) == 0:
line = '\n'
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ''
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare('sel.first', '!=', 'sel.last'):
return
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write('KeyboardInterrupt\n')
self.showprompt()
return 'break'
self.endoffile = 0
self.canceled = 1
if self.executing and self.interp.rpcclt:
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit()
return 'break'
def eof_callback(self, event):
if self.executing and not self.reading:
return
if not (self.text.compare('iomark', '==', 'insert') and self.text.
compare('insert', '==', 'end-1c')):
return
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return 'break'
def linefeed_callback(self, event):
if self.reading:
self.text.insert('insert', '\n')
self.text.see('insert')
else:
self.newline_and_indent_event(event)
return 'break'
def enter_callback(self, event):
if self.executing and not self.reading:
return
try:
sel = self.text.get('sel.first', 'sel.last')
if sel:
if self.text.compare('sel.last', '<=', 'iomark'):
self.recall(sel, event)
return 'break'
except:
pass
if self.text.compare('insert', '<', 'iomark linestart'):
prev = self.text.tag_prevrange('stdin', 'insert')
if prev and self.text.compare('insert', '<', prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return 'break'
next = self.text.tag_nextrange('stdin', 'insert')
if next and self.text.compare('insert lineend', '>=', next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return 'break'
indices = self.text.tag_nextrange('console', 'insert linestart')
if indices and self.text.compare(indices[0], '<=',
'insert linestart'):
self.recall(self.text.get(indices[1], 'insert lineend'), event)
else:
self.recall(self.text.get('insert linestart',
'insert lineend'), event)
return 'break'
if self.text.compare('insert', '<', 'iomark'):
self.text.mark_set('insert', 'iomark')
s = self.text.get('insert', 'end-1c')
if s and not s.strip():
self.text.delete('insert', 'end-1c')
if self.text.compare('insert', '<', 'end-1c linestart'):
self.newline_and_indent_event(event)
return 'break'
self.text.mark_set('insert', 'end-1c')
if self.reading:
self.text.insert('insert', '\n')
self.text.see('insert')
else:
self.newline_and_indent_event(event)
self.text.tag_add('stdin', 'iomark', 'end-1c')
self.text.update_idletasks()
if self.reading:
self.top.quit()
else:
self.runit()
return 'break'
def recall(self, s, event):
s = re.sub('^\\s*\\n', '', s)
s = re.sub('\\n\\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove('sel', '1.0', 'end')
self.text.mark_set('insert', 'end-1c')
prefix = self.text.get('insert linestart', 'insert')
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get('insert linestart', 'insert')
self.text.insert('insert', lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search('^([ \\t]*)', lines[0]).group(0)
new_base_indent = re.search('^([ \\t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n' + line.rstrip())
finally:
self.text.see('insert')
self.text.undo_block_stop()
def runit(self):
line = self.text.get('iomark', 'end-1c')
i = len(line)
while i > 0 and line[i - 1] in ' \t':
i = i - 1
if i > 0 and line[i - 1] == '\n':
i = i - 1
while i > 0 and line[i - 1] in ' \t':
i = i - 1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror('No stack trace',
"""There is no stack trace yet.
(sys.last_traceback is not defined)"""
, parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see('iomark')
self.text.see('restart')
def restart_shell(self, event=None):
"""Callback for Run/Restart Shell Cntl-F6"""
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ''
self.console.write(s)
self.text.mark_set('insert', 'end-1c')
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get('iomark', 'end-1c')
if self.history:
self.history.store(source)
if self.text.get('end-2c') != '\n':
self.text.insert('end-1c', '\n')
self.text.mark_set('iomark', 'end-1c')
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError('UCS-2', char, start, start + 1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity('iomark', 'right')
count = OutputWindow.write(self, s, tags, 'iomark')
self.text.mark_gravity('iomark', 'left')
except:
raise
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError:
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"""Make paste replace selection on x11. See issue #5124."""
if root._windowingsystem == 'x11':
for cls in ('Text', 'Entry', 'Spinbox'):
root.bind_class(cls, '<<Paste>>',
'catch {%W delete sel.first sel.last}\n' + root.bind_class(
cls, '<<Paste>>'))
usage_msg = """
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:deihnr:st:')
except getopt.error as msg:
print('Error: %s\n%s' % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(' Warning: running IDLE without a subprocess is deprecated.',
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print('No script file: ', script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup',
type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className='Idle')
root.withdraw()
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
else:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext)) for
size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile) for iconfile in
iconfiles]
root.wm_iconphoto(True, *icons)
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return
if macosx.isAquaTk() and flist.dict:
shell.top.lower()
else:
shell = flist.pyshell
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get('IDLESTARTUP') or os.environ.get(
'PYTHONSTARTUP')
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand(
"""if 1:
import sys as _sys
_sys.argv = %r
del _sys
"""
% (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict:
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == '__main__':
sys.modules['pyshell'] = sys.modules['__main__']
main()
capture_warnings(False)
|
webcamvideostream.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# release the camera stream
self.stream.release()
|
kb_MetricsServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_Metrics.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_Metrics'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_Metrics.kb_MetricsImpl import kb_Metrics # noqa @IgnorePep8
impl_kb_Metrics = kb_Metrics(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_Metrics'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_Metrics.get_app_metrics,
name='kb_Metrics.get_app_metrics',
types=[dict])
self.method_authentication['kb_Metrics.get_app_metrics'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.map_ws_narrative_names,
name='kb_Metrics.map_ws_narrative_names',
types=[list])
self.method_authentication['kb_Metrics.map_ws_narrative_names'] = 'optional' # noqa
self.rpc_service.add(impl_kb_Metrics.update_metrics,
name='kb_Metrics.update_metrics',
types=[dict])
self.method_authentication['kb_Metrics.update_metrics'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_details,
name='kb_Metrics.get_user_details',
types=[dict])
self.method_authentication['kb_Metrics.get_user_details'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_nonkbuser_details,
name='kb_Metrics.get_nonkbuser_details',
types=[dict])
self.method_authentication['kb_Metrics.get_nonkbuser_details'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_signup_returning_users,
name='kb_Metrics.get_signup_returning_users',
types=[dict])
self.method_authentication['kb_Metrics.get_signup_returning_users'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_signup_returning_nonkbusers,
name='kb_Metrics.get_signup_returning_nonkbusers',
types=[dict])
self.method_authentication['kb_Metrics.get_signup_returning_nonkbusers'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_counts_per_day,
name='kb_Metrics.get_user_counts_per_day',
types=[dict])
self.method_authentication['kb_Metrics.get_user_counts_per_day'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_total_logins,
name='kb_Metrics.get_total_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_total_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_nonkb_total_logins,
name='kb_Metrics.get_nonkb_total_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_nonkb_total_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_logins,
name='kb_Metrics.get_user_logins',
types=[dict])
self.method_authentication['kb_Metrics.get_user_logins'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_numObjs,
name='kb_Metrics.get_user_numObjs',
types=[dict])
self.method_authentication['kb_Metrics.get_user_numObjs'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_narrative_stats,
name='kb_Metrics.get_narrative_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_narrative_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_all_narrative_stats,
name='kb_Metrics.get_all_narrative_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_all_narrative_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.get_user_ws_stats,
name='kb_Metrics.get_user_ws_stats',
types=[dict])
self.method_authentication['kb_Metrics.get_user_ws_stats'] = 'required' # noqa
self.rpc_service.add(impl_kb_Metrics.status,
name='kb_Metrics.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_Metrics ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.